{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Overview\n", "\n", "In this notebook, we walk through all the necessary components of running experiments on LIBERO, and some common usage such as defining your own algorithm and policy architectures in the codebase.\n", "\n", "1. Dataset preparation for your algorithms\n", "2. Write your own algorithm\n", " - Subclassing from `Sequential` base class\n", "3. Write your own model\n", "4. Write your training loop\n", "5. Visualize results" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Experiments" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/tmp/ipykernel_24770/2563292850.py:18: UserWarning: \n", "The version_base parameter is not specified.\n", "Please specify a compatability version level, or None.\n", "Will assume defaults for version 1.1\n", " initialize(config_path=\"../libero/configs\")\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "{ 'color_aug': { 'network': 'BatchWiseImgColorJitterAug',\n", " 'network_kwargs': { 'brightness': 0.3,\n", " 'contrast': 0.3,\n", " 'epsilon': 0.1,\n", " 'hue': 0.3,\n", " 'input_shape': None,\n", " 'saturation': 0.3}},\n", " 'embed_size': 64,\n", " 'extra_hidden_size': 128,\n", " 'extra_num_layers': 0,\n", " 'image_encoder': { 'network': 'ResnetEncoder',\n", " 'network_kwargs': { 'freeze': False,\n", " 'language_fusion': 'film',\n", " 'no_stride': False,\n", " 'pretrained': False,\n", " 'remove_layer_num': 4}},\n", " 'language_encoder': { 'network': 'MLPEncoder',\n", " 'network_kwargs': { 'hidden_size': 128,\n", " 'input_size': 768,\n", " 'num_layers': 1,\n", " 'output_size': 128}},\n", " 'policy_head': { 'loss_kwargs': {'loss_coef': 1.0},\n", " 'network': 'GMMHead',\n", " 'network_kwargs': { 'activation': 'softplus',\n", " 'hidden_size': 1024,\n", " 'low_eval_noise': False,\n", " 'min_std': 0.0001,\n", " 'num_layers': 2,\n", " 'num_modes': 5}},\n", " 'policy_type': 'BCTransformerPolicy',\n", " 'temporal_position_encoding': { 'network': 'SinusoidalPositionEncoding',\n", " 'network_kwargs': { 'factor_ratio': None,\n", " 'input_size': None,\n", " 'inv_freq_factor': 10}},\n", " 'transformer_dropout': 0.1,\n", " 'transformer_head_output_size': 64,\n", " 'transformer_input_size': None,\n", " 'transformer_max_seq_len': 10,\n", " 'transformer_mlp_hidden_size': 256,\n", " 'transformer_num_heads': 6,\n", " 'transformer_num_layers': 4,\n", " 'translation_aug': { 'network': 'TranslationAug',\n", " 'network_kwargs': { 'input_shape': None,\n", " 'translation': 8}}}\n", "('Note that the number of epochs used in this example is intentionally reduced '\n", " 'to 5.')\n", "[info] using task orders [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "\n", "============= Initialized Observation Utils with Obs Spec =============\n", "\n", "using obs modality: rgb with keys: ['eye_in_hand_rgb', 'agentview_rgb']\n", "using obs modality: depth with keys: []\n", "using obs modality: low_dim with keys: ['gripper_states', 'joint_states']\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1126.19it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1162.72it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1175.77it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1206.08it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1134.37it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 967.75it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1178.42it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1222.58it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1164.09it/s]\n", "SequenceDataset: loading dataset into memory...\n", "100%|██████████| 50/50 [00:00<00:00, 1151.78it/s]\n" ] } ], "source": [ "from hydra import compose, initialize\n", "\n", "from libero.libero import benchmark, get_libero_path\n", "import hydra\n", "import pprint\n", "import os\n", "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n", "os.environ['PYOPENGL_PLATFORM'] = 'egl'\n", "from omegaconf import OmegaConf\n", "import yaml\n", "from easydict import EasyDict\n", "from libero.libero.benchmark import get_benchmark\n", "from libero.lifelong.datasets import (GroupedTaskDataset, SequenceVLDataset, get_dataset)\n", "from libero.lifelong.utils import (get_task_embs, safe_device, create_experiment_dir)\n", "hydra.core.global_hydra.GlobalHydra.instance().clear()\n", "\n", "### load the default hydra config\n", "initialize(config_path=\"../libero/configs\")\n", "hydra_cfg = compose(config_name=\"config\")\n", "yaml_config = OmegaConf.to_yaml(hydra_cfg)\n", "cfg = EasyDict(yaml.safe_load(yaml_config))\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "pp.pprint(cfg.policy)\n", "\n", "# prepare lifelong learning\n", "cfg.folder = get_libero_path(\"datasets\")\n", "cfg.bddl_folder = get_libero_path(\"bddl_files\")\n", "cfg.init_states_folder = get_libero_path(\"init_states\")\n", "cfg.eval.num_procs = 1\n", "cfg.eval.n_eval = 5\n", "\n", "cfg.train.n_epochs = 25\n", "\n", "pp.pprint(f\"Note that the number of epochs used in this example is intentionally reduced to 5.\")\n", "\n", "task_order = cfg.data.task_order_index # can be from {0 .. 21}, default to 0, which is [task 0, 1, 2 ...]\n", "cfg.benchmark_name = \"libero_object\" # can be from {\"libero_spatial\", \"libero_object\", \"libero_goal\", \"libero_10\"}\n", "benchmark = get_benchmark(cfg.benchmark_name)(task_order)\n", "\n", "# prepare datasets from the benchmark\n", "datasets = []\n", "descriptions = []\n", "shape_meta = None\n", "n_tasks = benchmark.n_tasks\n", "\n", "for i in range(n_tasks):\n", " # currently we assume tasks from same benchmark have the same shape_meta\n", " task_i_dataset, shape_meta = get_dataset(\n", " dataset_path=os.path.join(cfg.folder, benchmark.get_task_demonstration(i)),\n", " obs_modality=cfg.data.obs.modality,\n", " initialize_obs_utils=(i==0),\n", " seq_len=cfg.data.seq_len,\n", " )\n", " # add language to the vision dataset, hence we call vl_dataset\n", " descriptions.append(benchmark.get_task(i).language)\n", " datasets.append(task_i_dataset)\n", "\n", "task_embs = get_task_embs(cfg, descriptions)\n", "benchmark.set_task_embs(task_embs)\n", "\n", "datasets = [SequenceVLDataset(ds, emb) for (ds, emb) in zip(datasets, task_embs)]\n", "n_demos = [data.n_demos for data in datasets]\n", "n_sequences = [data.total_num_sequences for data in datasets]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Write your own policy architecture" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import robomimic.utils.tensor_utils as TensorUtils\n", "import torch\n", "import torch.nn as nn\n", "\n", "from einops import rearrange, repeat\n", "from libero.lifelong.models.modules.rgb_modules import *\n", "from libero.lifelong.models.modules.language_modules import *\n", "from libero.lifelong.models.base_policy import BasePolicy\n", "from libero.lifelong.models.policy_head import *\n", "from libero.lifelong.models.modules.transformer_modules import *\n", "\n", "###############################################################################\n", "#\n", "# A model handling extra input modalities besides images at time t.\n", "#\n", "###############################################################################\n", "\n", "class ExtraModalityTokens(nn.Module):\n", " def __init__(\n", " self,\n", " use_joint=False,\n", " use_gripper=False,\n", " use_ee=False,\n", " extra_num_layers=0,\n", " extra_hidden_size=64,\n", " extra_embedding_size=32,\n", " ):\n", " \"\"\"\n", " This is a class that maps all extra modality inputs into tokens of the same size\n", " \"\"\"\n", " super().__init__()\n", " self.use_joint = use_joint\n", " self.use_gripper = use_gripper\n", " self.use_ee = use_ee\n", " self.extra_embedding_size = extra_embedding_size\n", "\n", " joint_states_dim = 7\n", " gripper_states_dim = 2\n", " ee_dim = 3\n", "\n", " self.num_extra = int(use_joint) + int(use_gripper) + int(use_ee)\n", "\n", " extra_low_level_feature_dim = (\n", " int(use_joint) * joint_states_dim\n", " + int(use_gripper) * gripper_states_dim\n", " + int(use_ee) * ee_dim\n", " )\n", "\n", " assert extra_low_level_feature_dim > 0, \"[error] no extra information\"\n", "\n", " self.extra_encoders = {}\n", "\n", " def generate_proprio_mlp_fn(modality_name, extra_low_level_feature_dim):\n", " assert extra_low_level_feature_dim > 0 # we indeed have extra information\n", " if extra_num_layers > 0:\n", " layers = [nn.Linear(extra_low_level_feature_dim, extra_hidden_size)]\n", " for i in range(1, extra_num_layers):\n", " layers += [\n", " nn.Linear(extra_hidden_size, extra_hidden_size),\n", " nn.ReLU(inplace=True),\n", " ]\n", " layers += [nn.Linear(extra_hidden_size, extra_embedding_size)]\n", " else:\n", " layers = [nn.Linear(extra_low_level_feature_dim, extra_embedding_size)]\n", "\n", " self.proprio_mlp = nn.Sequential(*layers)\n", " self.extra_encoders[modality_name] = {\"encoder\": self.proprio_mlp}\n", "\n", " for (proprio_dim, use_modality, modality_name) in [\n", " (joint_states_dim, self.use_joint, \"joint_states\"),\n", " (gripper_states_dim, self.use_gripper, \"gripper_states\"),\n", " (ee_dim, self.use_ee, \"ee_states\"),\n", " ]:\n", "\n", " if use_modality:\n", " generate_proprio_mlp_fn(modality_name, proprio_dim)\n", "\n", " self.encoders = nn.ModuleList(\n", " [x[\"encoder\"] for x in self.extra_encoders.values()]\n", " )\n", "\n", " def forward(self, obs_dict):\n", " \"\"\"\n", " obs_dict: {\n", " (optional) joint_stats: (B, T, 7),\n", " (optional) gripper_states: (B, T, 2),\n", " (optional) ee: (B, T, 3)\n", " }\n", " map above to a latent vector of shape (B, T, H)\n", " \"\"\"\n", " tensor_list = []\n", "\n", " for (use_modality, modality_name) in [\n", " (self.use_joint, \"joint_states\"),\n", " (self.use_gripper, \"gripper_states\"),\n", " (self.use_ee, \"ee_states\"),\n", " ]:\n", "\n", " if use_modality:\n", " tensor_list.append(\n", " self.extra_encoders[modality_name][\"encoder\"](\n", " obs_dict[modality_name]\n", " )\n", " )\n", "\n", " x = torch.stack(tensor_list, dim=-2)\n", " return x\n", "\n", "###############################################################################\n", "#\n", "# A Transformer policy\n", "#\n", "###############################################################################\n", "\n", "\n", "class MyTransformerPolicy(BasePolicy):\n", " \"\"\"\n", " Input: (o_{t-H}, ... , o_t)\n", " Output: a_t or distribution of a_t\n", " \"\"\"\n", "\n", " def __init__(self, cfg, shape_meta):\n", " super().__init__(cfg, shape_meta)\n", " policy_cfg = cfg.policy\n", "\n", " ### 1. encode image\n", " embed_size = policy_cfg.embed_size\n", " transformer_input_sizes = []\n", " self.image_encoders = {}\n", " for name in shape_meta[\"all_shapes\"].keys():\n", " if \"rgb\" in name or \"depth\" in name:\n", " kwargs = policy_cfg.image_encoder.network_kwargs\n", " kwargs.input_shape = shape_meta[\"all_shapes\"][name]\n", " kwargs.output_size = embed_size\n", " kwargs.language_dim = (\n", " policy_cfg.language_encoder.network_kwargs.input_size\n", " )\n", " self.image_encoders[name] = {\n", " \"input_shape\": shape_meta[\"all_shapes\"][name],\n", " \"encoder\": eval(policy_cfg.image_encoder.network)(**kwargs),\n", " }\n", "\n", " self.encoders = nn.ModuleList(\n", " [x[\"encoder\"] for x in self.image_encoders.values()]\n", " )\n", "\n", " ### 2. encode language\n", " policy_cfg.language_encoder.network_kwargs.output_size = embed_size\n", " self.language_encoder = eval(policy_cfg.language_encoder.network)(\n", " **policy_cfg.language_encoder.network_kwargs\n", " )\n", "\n", " ### 3. encode extra information (e.g. gripper, joint_state)\n", " self.extra_encoder = ExtraModalityTokens(\n", " use_joint=cfg.data.use_joint,\n", " use_gripper=cfg.data.use_gripper,\n", " use_ee=cfg.data.use_ee,\n", " extra_num_layers=policy_cfg.extra_num_layers,\n", " extra_hidden_size=policy_cfg.extra_hidden_size,\n", " extra_embedding_size=embed_size,\n", " )\n", "\n", " ### 4. define temporal transformer\n", " policy_cfg.temporal_position_encoding.network_kwargs.input_size = embed_size\n", " self.temporal_position_encoding_fn = eval(\n", " policy_cfg.temporal_position_encoding.network\n", " )(**policy_cfg.temporal_position_encoding.network_kwargs)\n", "\n", " self.temporal_transformer = TransformerDecoder(\n", " input_size=embed_size,\n", " num_layers=policy_cfg.transformer_num_layers,\n", " num_heads=policy_cfg.transformer_num_heads,\n", " head_output_size=policy_cfg.transformer_head_output_size,\n", " mlp_hidden_size=policy_cfg.transformer_mlp_hidden_size,\n", " dropout=policy_cfg.transformer_dropout,\n", " )\n", "\n", " policy_head_kwargs = policy_cfg.policy_head.network_kwargs\n", " policy_head_kwargs.input_size = embed_size\n", " policy_head_kwargs.output_size = shape_meta[\"ac_dim\"]\n", "\n", " self.policy_head = eval(policy_cfg.policy_head.network)(\n", " **policy_cfg.policy_head.loss_kwargs,\n", " **policy_cfg.policy_head.network_kwargs\n", " )\n", "\n", " self.latent_queue = []\n", " self.max_seq_len = policy_cfg.transformer_max_seq_len\n", "\n", " def temporal_encode(self, x):\n", " pos_emb = self.temporal_position_encoding_fn(x)\n", " x = x + pos_emb.unsqueeze(1) # (B, T, num_modality, E)\n", " sh = x.shape\n", " self.temporal_transformer.compute_mask(x.shape)\n", "\n", " x = TensorUtils.join_dimensions(x, 1, 2) # (B, T*num_modality, E)\n", " x = self.temporal_transformer(x)\n", " x = x.reshape(*sh)\n", " return x[:, :, 0] # (B, T, E)\n", "\n", " def spatial_encode(self, data):\n", " # 1. encode extra\n", " extra = self.extra_encoder(data[\"obs\"]) # (B, T, num_extra, E)\n", "\n", " # 2. encode language, treat it as action token\n", " B, T = extra.shape[:2]\n", " text_encoded = self.language_encoder(data) # (B, E)\n", " text_encoded = text_encoded.view(B, 1, 1, -1).expand(\n", " -1, T, -1, -1\n", " ) # (B, T, 1, E)\n", " encoded = [text_encoded, extra]\n", "\n", " # 3. encode image\n", " for img_name in self.image_encoders.keys():\n", " x = data[\"obs\"][img_name]\n", " B, T, C, H, W = x.shape\n", " img_encoded = self.image_encoders[img_name][\"encoder\"](\n", " x.reshape(B * T, C, H, W),\n", " langs=data[\"task_emb\"]\n", " .reshape(B, 1, -1)\n", " .repeat(1, T, 1)\n", " .reshape(B * T, -1),\n", " ).view(B, T, 1, -1)\n", " encoded.append(img_encoded)\n", " encoded = torch.cat(encoded, -2) # (B, T, num_modalities, E)\n", " return encoded\n", "\n", " def forward(self, data):\n", " x = self.spatial_encode(data)\n", " x = self.temporal_encode(x)\n", " dist = self.policy_head(x)\n", " return dist\n", "\n", " def get_action(self, data):\n", " self.eval()\n", " with torch.no_grad():\n", " data = self.preprocess_input(data, train_mode=False)\n", " x = self.spatial_encode(data)\n", " self.latent_queue.append(x)\n", " if len(self.latent_queue) > self.max_seq_len:\n", " self.latent_queue.pop(0)\n", " x = torch.cat(self.latent_queue, dim=1) # (B, T, H_all)\n", " x = self.temporal_encode(x)\n", " dist = self.policy_head(x[:, -1])\n", " action = dist.sample().detach().cpu()\n", " return action.view(action.shape[0], -1).numpy()\n", "\n", " def reset(self):\n", " self.latent_queue = []" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Write your own lifelong learning algorithm" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[1m\u001b[33m[robosuite WARNING] \u001b[0mNo private macro file found! (__init__.py:7)\n", "\u001b[1m\u001b[33m[robosuite WARNING] \u001b[0mIt is recommended to use a private macro file (__init__.py:8)\n", "\u001b[1m\u001b[33m[robosuite WARNING] \u001b[0mTo setup, run: python /home/yifengz/workspace/robosuite-master/robosuite/scripts/setup_macros.py (__init__.py:9)\n" ] } ], "source": [ "from libero.lifelong.algos.base import Sequential\n", "\n", "### All lifelong learning algorithm should inherit the Sequential algorithm super class\n", "\n", "class MyLifelongAlgo(Sequential):\n", " \"\"\"\n", " The experience replay policy.\n", " \"\"\"\n", " def __init__(self,\n", " n_tasks,\n", " cfg,\n", " **policy_kwargs):\n", " super().__init__(n_tasks=n_tasks, cfg=cfg, **policy_kwargs)\n", " # define the learning policy\n", " self.datasets = []\n", " self.policy = eval(cfg.policy.policy_type)(cfg, cfg.shape_meta)\n", "\n", " def start_task(self, task):\n", " # what to do at the beginning of a new task\n", " super().start_task(task)\n", "\n", " def end_task(self, dataset, task_id, benchmark):\n", " # what to do when finish learning a new task\n", " self.datasets.append(dataset)\n", "\n", " def observe(self, data):\n", " # how the algorithm observes a data and returns a loss to be optimized\n", " loss = super().observe(data)\n", " return loss" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# 4. Write your training script" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "experiment directory is: ./experiments/libero_object/MyLifelongAlgo/MyTransformerPolicy_seed10000/run_006\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/yifengz/.pyenv/versions/3.8.5/envs/new-continual-learning/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n", " warnings.warn(\n", "/home/yifengz/.pyenv/versions/3.8.5/envs/new-continual-learning/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=None`.\n", " warnings.warn(msg)\n", "/home/yifengz/.pyenv/versions/3.8.5/envs/new-continual-learning/lib/python3.8/site-packages/torch/functional.py:478: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2894.)\n", " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "NOTE: the number of epochs used in this example is intentionally reduced to 30 for simplicity.\n", "NOTE: the number of evaluation episodes used in this example is intentionally reduced to 5 for simplicity.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/10 [00:00" ], "text/plain": [ "" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from IPython.display import HTML\n", "from base64 import b64encode\n", "import imageio\n", "\n", "from libero.libero.envs import OffScreenRenderEnv, DummyVectorEnv\n", "from libero.lifelong.metric import raw_obs_to_tensor_obs\n", "\n", "# You can turn on subprocess\n", "env_num = 1\n", "action_dim = 7\n", "\n", "\n", "# If it's packnet, the weights need to be processed first\n", "task_id = 9\n", "task = benchmark.get_task(task_id)\n", "task_emb = benchmark.get_task_emb(task_id)\n", "\n", "if cfg.lifelong.algo == \"PackNet\":\n", " algo = algo.get_eval_algo(task_id)\n", "\n", "algo.eval()\n", "env_args = {\n", " \"bddl_file_name\": os.path.join(\n", " cfg.bddl_folder, task.problem_folder, task.bddl_file\n", " ),\n", " \"camera_heights\": cfg.data.img_h,\n", " \"camera_widths\": cfg.data.img_w,\n", "}\n", "\n", "env = DummyVectorEnv(\n", " [lambda: OffScreenRenderEnv(**env_args) for _ in range(env_num)]\n", ")\n", "\n", "init_states_path = os.path.join(\n", " cfg.init_states_folder, task.problem_folder, task.init_states_file\n", ")\n", "init_states = torch.load(init_states_path)\n", "\n", "env.reset()\n", "\n", "init_state = init_states[0:1]\n", "dones = [False]\n", "\n", "algo.reset()\n", "\n", "obs = env.set_init_state(init_state)\n", "\n", "\n", "# Make sure the gripepr is open to make it consistent with the provided demos.\n", "dummy_actions = np.zeros((env_num, action_dim))\n", "for _ in range(5):\n", " obs, _, _, _ = env.step(dummy_actions)\n", "\n", "steps = 0\n", "\n", "obs_tensors = [[]] * env_num\n", "while steps < cfg.eval.max_steps:\n", " steps += 1\n", " data = raw_obs_to_tensor_obs(obs, task_emb, cfg)\n", " action = algo.policy.get_action(data)\n", "\n", " obs, reward, done, info = env.step(action)\n", "\n", " for k in range(env_num):\n", " dones[k] = dones[k] or done[k]\n", " obs_tensors[k].append(obs[k][\"agentview_image\"])\n", " if all(dones):\n", " break\n", "\n", "# visualize video\n", "# obs_tensor: (env_num, T, H, W, C)\n", "\n", "images = [img[::-1] for img in obs_tensors[0]]\n", "fps = 30\n", "writer = imageio.get_writer('tmp_video.mp4', fps=fps)\n", "for image in images:\n", " writer.append_data(image)\n", "writer.close()\n", "\n", "video_data = open(\"tmp_video.mp4\", \"rb\").read()\n", "video_tag = f'