emergenz commited on
Commit
8bec397
·
1 Parent(s): 7d82646

quickfix: newline

Browse files
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2f4ff312-abac-4732-805a-ec6c245e532e1752745206273-2025_07_17-11.40.45.491/source.csv CHANGED
@@ -539,7 +539,8 @@ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
539
  635,4004450,"TERMINAL",0,0,"/",,terminal_output
540
  636,4004761,"TERMINAL",0,0,"train_dynamics_lr_1e-4_larger_ffn_3353271.log",,terminal_output
541
  637,4005221,"TERMINAL",0,0,"train_dynamics_lr_1e-4_larger_ffn_3353271.log\r\n[?2004l\r[?1049h[?1h=\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=2\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=24:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_lr_1e-4_larger_ffn\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrec ords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dynamics-cotraini ng-modelsize-scaling/$job_name\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr- scaling/train_tokenizer_lr_sweep_1e-4\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --tokenizer_ffn_dim=512 \\r\n --tokenizer_num_blocks=8 \\r\n --lam_ffn_dim=2048 \\r\n --lam_num_blocks=4 \\r\n --dyna_ffn_dim=2048 \\r\n --dyna_num_blocks=6 \\r\n --init_lr=0 \\r\n --max_lr=1.5e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-lr-1e-4-larger-ffn \\r\n --tags dynamics lr-1e-4 larger-ffn mixed-precision flash-attention \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir \\r\nSLURM_JOB_USER=tum_dbd0378\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=996262\r\nSLURM_TASK_PID=2771207\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar\r\nSLURMD_NODENAME=hkn0516\r\nSLURM_JOB_START_TIME=1752748479\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752834879\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_JOBID=3353271\r\nSLURM_JOB_QOS=normal\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0516\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_NODELIST=hkn[0516,0518]\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3353271\r\nSLURM_NODEID=0\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=train_dynamics_lr_1e-4_larger_ffn\r\n/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/train_dynamics_lr_1e-4_larger_ffn_3353271.log",,terminal_output
542
- 638,4006127,"TERMINAL",0,0,"\rSLURM_NTASKS_PER_NODE=4\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0516,0518]\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c lass 'jax.numpy.bfloat16'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle  this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c lass 'jax.numpy.bfloat16'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle  this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c :",,terminal_output908,7635533,"genie.py",0,0,"",python,tab
 
543
  909,7639083,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=False),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n",python,tab
544
  910,7640689,"utils/nn.py",3171,0,"",python,selection_command
545
  911,7642306,"utils/nn.py",3203,0,"",python,selection_command
 
539
  635,4004450,"TERMINAL",0,0,"/",,terminal_output
540
  636,4004761,"TERMINAL",0,0,"train_dynamics_lr_1e-4_larger_ffn_3353271.log",,terminal_output
541
  637,4005221,"TERMINAL",0,0,"train_dynamics_lr_1e-4_larger_ffn_3353271.log\r\n[?2004l\r[?1049h[?1h=\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=2\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=24:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_lr_1e-4_larger_ffn\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrec ords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dynamics-cotraini ng-modelsize-scaling/$job_name\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr- scaling/train_tokenizer_lr_sweep_1e-4\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --tokenizer_ffn_dim=512 \\r\n --tokenizer_num_blocks=8 \\r\n --lam_ffn_dim=2048 \\r\n --lam_num_blocks=4 \\r\n --dyna_ffn_dim=2048 \\r\n --dyna_num_blocks=6 \\r\n --init_lr=0 \\r\n --max_lr=1.5e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-lr-1e-4-larger-ffn \\r\n --tags dynamics lr-1e-4 larger-ffn mixed-precision flash-attention \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir \\r\nSLURM_JOB_USER=tum_dbd0378\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=996262\r\nSLURM_TASK_PID=2771207\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar\r\nSLURMD_NODENAME=hkn0516\r\nSLURM_JOB_START_TIME=1752748479\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752834879\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_JOBID=3353271\r\nSLURM_JOB_QOS=normal\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0516\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_NODELIST=hkn[0516,0518]\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3353271\r\nSLURM_NODEID=0\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=train_dynamics_lr_1e-4_larger_ffn\r\n/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/train_dynamics_lr_1e-4_larger_ffn_3353271.log",,terminal_output
542
+ 638,4006127,"TERMINAL",0,0,"\rSLURM_NTASKS_PER_NODE=4\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0516,0518]\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c lass 'jax.numpy.bfloat16'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle  this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c lass 'jax.numpy.bfloat16'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle  this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default val ue `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to h andle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py: 347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<c :",,terminal_output
543
+ 908,7635533,"genie.py",0,0,"",python,tab
544
  909,7639083,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=False),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n",python,tab
545
  910,7640689,"utils/nn.py",3171,0,"",python,selection_command
546
  911,7642306,"utils/nn.py",3203,0,"",python,selection_command