Blazewild commited on
Commit
79294b0
·
verified ·
1 Parent(s): 8360eb0

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. CoCap/.gitignore +163 -0
  3. CoCap/LICENSE +21 -0
  4. CoCap/README.md +73 -0
  5. CoCap/assets/framework.svg +0 -0
  6. CoCap/cocap/__init__.py +5 -0
  7. CoCap/cocap/data/__init__.py +5 -0
  8. CoCap/cocap/data/datasets/__init__.py +5 -0
  9. CoCap/cocap/data/datasets/avcaps.py +195 -0
  10. CoCap/cocap/data/datasets/compressed_video/__init__.py +5 -0
  11. CoCap/cocap/data/datasets/compressed_video/compressed_video_utils.py +40 -0
  12. CoCap/cocap/data/datasets/compressed_video/dataset_msrvtt.py +127 -0
  13. CoCap/cocap/data/datasets/compressed_video/dataset_msvd.py +118 -0
  14. CoCap/cocap/data/datasets/compressed_video/dataset_vatex.py +127 -0
  15. CoCap/cocap/data/datasets/compressed_video/transforms.py +184 -0
  16. CoCap/cocap/data/datasets/compressed_video/video_readers.py +347 -0
  17. CoCap/cocap/data/datasets/compressed_video/video_text_base.py +120 -0
  18. CoCap/cocap/modeling/__init__.py +5 -0
  19. CoCap/cocap/modeling/av_captioner.py +219 -0
  20. CoCap/cocap/modeling/eval_captioning.py +104 -0
  21. CoCap/cocap/modeling/lm_cocap.py +242 -0
  22. CoCap/cocap/modeling/loss.py +62 -0
  23. CoCap/cocap/modeling/optimization.py +185 -0
  24. CoCap/cocap/modules/README.md +2 -0
  25. CoCap/cocap/modules/__init__.py +5 -0
  26. CoCap/cocap/modules/audio_encoder.py +194 -0
  27. CoCap/cocap/modules/beats/BEATs.py +183 -0
  28. CoCap/cocap/modules/beats/__init__.py +1 -0
  29. CoCap/cocap/modules/beats/backbone.py +783 -0
  30. CoCap/cocap/modules/beats/modules.py +219 -0
  31. CoCap/cocap/modules/bert.py +403 -0
  32. CoCap/cocap/modules/clip/__init__.py +1 -0
  33. CoCap/cocap/modules/clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  34. CoCap/cocap/modules/clip/clip.py +250 -0
  35. CoCap/cocap/modules/clip/model.py +500 -0
  36. CoCap/cocap/modules/clip/simple_tokenizer.py +134 -0
  37. CoCap/cocap/modules/compressed_video/__init__.py +8 -0
  38. CoCap/cocap/modules/compressed_video/compressed_video_captioner.py +228 -0
  39. CoCap/cocap/modules/compressed_video/compressed_video_transformer.py +317 -0
  40. CoCap/cocap/utils/__init__.py +5 -0
  41. CoCap/cocap/utils/checkpoint.py +132 -0
  42. CoCap/cocap/utils/image.py +24 -0
  43. CoCap/cocap/utils/json.py +28 -0
  44. CoCap/cocap/utils/logging.py +90 -0
  45. CoCap/cocap/utils/profile.py +102 -0
  46. CoCap/cocap/utils/registry.py +52 -0
  47. CoCap/cocap/utils/train_utils.py +148 -0
  48. CoCap/cocap/utils/video.py +98 -0
  49. CoCap/cocap/utils/visualize.py +41 -0
  50. CoCap/cocap/utils/writer.py +30 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ CoCap/poster.pdf filter=lfs diff=lfs merge=lfs -text
CoCap/.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ .idea
163
+ .DS_Store
CoCap/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Yaojie Shen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
CoCap/README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accurate and Fast Compressed Video Captioning
2
+
3
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/accurate-and-fast-compressed-video-captioning/video-captioning-on-msr-vtt-1)](https://paperswithcode.com/sota/video-captioning-on-msr-vtt-1?p=accurate-and-fast-compressed-video-captioning)
4
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/accurate-and-fast-compressed-video-captioning/video-captioning-on-msvd-1)](https://paperswithcode.com/sota/video-captioning-on-msvd-1?p=accurate-and-fast-compressed-video-captioning)
5
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/accurate-and-fast-compressed-video-captioning/video-captioning-on-vatex-1)](https://paperswithcode.com/sota/video-captioning-on-vatex-1?p=accurate-and-fast-compressed-video-captioning)
6
+
7
+
8
+ ✨This is the official implementation of ICCV 2023 paper *[Accurate and Fast Compressed Video Captioning](https://arxiv.org/abs/2309.12867)*.
9
+
10
+ 🚀 This code is a revised version of the original release, incorporating Hydra and PyTorch Lightning. For the original implementation, please refer to [this commit](https://github.com/Yaojie-Shen/CoCap/tree/initial_release).
11
+
12
+ ## Introduction
13
+
14
+ In this work, we propose an end-to-end video captioning method based on compressed domain information from the encoded H.264 videos. Our approach aims to accurately generate captions for compressed videos in a fast and efficient manner.
15
+
16
+ ![Framework](./assets/framework.svg)
17
+
18
+ By releasing this code, we hope to facilitate further research and development in the field of compressed video processing. If you find this work useful in your own research, please consider citing our paper as a reference.
19
+
20
+ ## Preparation
21
+
22
+ ### 1. Install the Requirements
23
+
24
+ To run the code, please install the dependency libraries by using the following command:
25
+
26
+ ```shell
27
+ sudo apt update && sudo apt install default-jre -y # required by pycocoevalcap
28
+ pip3 install -e . # See `requirements.txt` for exact versions used in development
29
+ ```
30
+
31
+ Additionally, you will need to install the compressed video reader as described in the README.md of [Compressed-Video-Reader](https://github.com/yaojie-shen/Compressed-Video-Reader).
32
+
33
+
34
+ ### 2. Prepare the Pretrained Models
35
+
36
+ Our model is based on the pretrained CLIP. You can run the following script to download the weights before training to avoid any network issues:
37
+
38
+ ```bash
39
+ sudo apt update && sudo apt install aria2 -y # install aria2
40
+ bash model_zoo/download_model.sh
41
+ ```
42
+
43
+ This will download the CLIP model to `model_zoo/clip_model`. Note that this directory is hard-coded in our code.
44
+
45
+ ### 3. Prepare the Data
46
+
47
+ We have conducted experiments on three video caption datasets: MSRVTT, MSVD, and VATEX. The datasets are stored in the `dataset` folder under the project root. For detailed instructions on downloading and preparing the training data, please refer to [dataset/README.md](./dataset/README.md).
48
+
49
+ ## Training & Evaluation
50
+
51
+ The training is configured using YAML, and all the configurations are listed in [`configs/compressed_video`](./configs/compressed_video). You can use the following commands to run the experiments:
52
+
53
+ ```shell
54
+ # msrvtt
55
+ python3 tools/train_net.py --config-name=exp/train/msrvtt_captioning
56
+ # msvd
57
+ python3 tools/train_net.py --config-name=exp/train/msvd_captioning
58
+ # vatex
59
+ python3 tools/train_net.py --config-name=exp/train/vatex_captioning
60
+ ```
61
+
62
+ By default, the logs and results will be saved to `./logs/<experiment_name>/`. The loss and metrics are visualized using tensorboard.
63
+
64
+ ## Citation
65
+
66
+ ```text
67
+ @inproceedings{cocap,
68
+ title={Accurate and Fast Compressed Video Captioning},
69
+ author={Yaojie Shen and Xin Gu and Kai Xu and Heng Fan and Longyin Wen and Libo Zhang},
70
+ booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
71
+ year={2023}
72
+ }
73
+ ```
CoCap/assets/framework.svg ADDED
CoCap/cocap/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/15 00:45
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/data/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/11 16:31
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/data/datasets/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/13 02:38
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/data/datasets/avcaps.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import random
4
+ import torch
5
+ import torchaudio
6
+ import torchvision.transforms as T
7
+ import torch.nn.functional as F
8
+ from torch.utils.data import Dataset
9
+ from PIL import Image
10
+ from torch.nn.utils.rnn import pad_sequence
11
+ from cocap.modules.clip.simple_tokenizer import SimpleTokenizer
12
+ import logging
13
+ try:
14
+ from torchvision.io import read_video
15
+ except ImportError:
16
+ read_video = None
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ class AVCapsDataset(Dataset):
21
+ def __init__(self, root_dir, split="train", tokenizer=None, max_audio_len=1024, max_cap_len=77):
22
+ self.root_dir = root_dir
23
+ self.split = split
24
+ self.tokenizer = tokenizer if tokenizer else SimpleTokenizer()
25
+ self.max_audio_len = max_audio_len
26
+ self.max_cap_len = max_cap_len
27
+
28
+ # Load JSON
29
+ json_path = os.path.join(root_dir, split, f"{split}_captions.json")
30
+ with open(json_path, 'r') as f:
31
+ self.data = json.load(f)
32
+
33
+ self.video_ids = list(self.data.keys())
34
+ self.video_folder = os.path.join(root_dir, split, "video_240p_h264") # Assuming this is where mp4s are
35
+ # If videos are not here, user might need to adjust.
36
+
37
+ # Transforms
38
+ self.transform = T.Compose([
39
+ T.Resize((224, 224)),
40
+ T.ToTensor(),
41
+ T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
42
+ ])
43
+
44
+ # Mel Spectrogram transform for BEATs (16k, 128 bins)
45
+ # Matches ta_kaldi.fbank(num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
46
+ # frame_length=25ms -> 400 samples, frame_shift=10ms -> 160 samples
47
+ self.mel_transform = torchaudio.transforms.MelSpectrogram(
48
+ sample_rate=16000,
49
+ n_mels=128,
50
+ n_fft=400,
51
+ hop_length=160
52
+ )
53
+
54
+ def __len__(self):
55
+ return len(self.video_ids)
56
+
57
+ def __getitem__(self, idx):
58
+ vid = self.video_ids[idx]
59
+ item_data = self.data[vid]
60
+
61
+ # Get Caption
62
+ # "Target the audio_visual_captions key"
63
+ if "audio_visual_captions" in item_data and len(item_data["audio_visual_captions"]) > 0:
64
+ caption = random.choice(item_data["audio_visual_captions"])
65
+ else:
66
+ caption = "" # Should not happen typically
67
+
68
+ # Tokenize Caption
69
+ tokens = self.tokenizer.encode(caption)
70
+ tokens = [49406] + tokens + [49407] # Add SOT, EOT. Check tokenizer for specific IDs.
71
+ # CLIP SOT=49406, EOT=49407.
72
+ # Truncate
73
+ if len(tokens) > self.max_cap_len:
74
+ tokens = tokens[:self.max_cap_len]
75
+ tokens[-1] = 49407
76
+
77
+ tokens = torch.tensor(tokens, dtype=torch.long)
78
+
79
+ # Load Video/Audio
80
+ video_path = os.path.join(self.video_folder, f"{vid}.mp4")
81
+
82
+ pixel_values = torch.zeros(3, 224, 224)
83
+ audio_spec = torch.zeros(64, self.max_audio_len) # Fixed size placeholder
84
+
85
+ try:
86
+ # Load Video
87
+ if os.path.exists(video_path):
88
+ # Only read needed parts to save IO.
89
+ # read_video returns (video, audio, info)
90
+ # video: (T, H, W, C), audio: (K, T_a)
91
+ vframes, aframes, info = read_video(video_path, pts_unit='sec', output_format="TCHW")
92
+
93
+ # 1. Image Processing (Sample random frame)
94
+ if vframes.shape[0] > 0:
95
+ # Random sample
96
+ frame_idx = random.randint(0, len(vframes)-1)
97
+ frame = vframes[frame_idx] # (C, H, W) is default if output_format="TCHW"?
98
+ # Check torchvision version behavior. Often read_video returns (T, H, W, C).
99
+ # If I used output_format="TCHW" (available in newer torchvision).
100
+ # Let's assume standard behavior (T, H, W, C) to be safe if output_format not supported easily.
101
+ pass
102
+ else:
103
+ # Try finding without extension or different ext?
104
+ pass
105
+
106
+ # Safe fallback reading
107
+ if os.path.exists(video_path):
108
+ vframes, aframes, info = read_video(video_path, pts_unit='sec')
109
+ # vframes: (T, H, W, C)
110
+
111
+ if vframes.shape[0] > 0:
112
+ idx = random.randint(0, len(vframes) - 1)
113
+ frame = vframes[idx] # (H, W, C)
114
+ frame = frame.permute(2, 0, 1) # (C, H, W)
115
+ pixel_values = self.transform(T.ToPILImage()(frame))
116
+
117
+ if aframes.shape[0] > 0:
118
+ # Mix down to mono
119
+ waveform = aframes.mean(0, keepdim=True) # (1, T_audio)
120
+ # Resample to 16k for BEATs
121
+ if info['audio_fps'] != 16000:
122
+ resampler = torchaudio.transforms.Resample(orig_freq=info['audio_fps'], new_freq=16000)
123
+ waveform = resampler(waveform)
124
+
125
+ audio_spec = self.mel_transform(waveform) # (1, F, T)
126
+ audio_spec = audio_spec.squeeze(0) # (F, T)
127
+
128
+ # Normalize for BEATs
129
+ # fbank = (fbank - fbank_mean) / (2 * fbank_std)
130
+ # fbank_mean=15.41663, fbank_std=6.55582
131
+ audio_spec = (audio_spec - 15.41663) / (2 * 6.55582)
132
+
133
+ # Trim or Pad audio
134
+ if audio_spec.shape[1] > self.max_audio_len:
135
+ audio_spec = audio_spec[:, :self.max_audio_len]
136
+ elif audio_spec.shape[1] < self.max_audio_len:
137
+ pad_amount = self.max_audio_len - audio_spec.shape[1]
138
+ audio_spec = F.pad(audio_spec, (0, pad_amount))
139
+
140
+ except Exception as e:
141
+ logger.warning(f"Error loading {vid}: {e}")
142
+ return None
143
+
144
+ # Return Tensors
145
+ # Since I can't actually read video here, I'll return placeholders if file doesn't exist
146
+ # But for the CODE generation task, I should write the logic.
147
+
148
+ return {
149
+ "video_id": vid,
150
+ "pixel_values": pixel_values,
151
+ "audio_spec": audio_spec, # (F, T)
152
+ "caption": tokens
153
+ }
154
+
155
+ def avcaps_collate_fn(batch):
156
+ # Filter failed loads
157
+ batch = [b for b in batch if b is not None]
158
+ if len(batch) == 0:
159
+ return None
160
+
161
+ # Pad Captions
162
+ captions = [b["caption"] for b in batch]
163
+ captions_padded = pad_sequence(captions, batch_first=True, padding_value=0) # 0 is PAD usually
164
+
165
+ # Create Attention Mask (1 for valid, 0 for pad)
166
+ caption_lengths = [len(c) for c in captions]
167
+ caption_mask = torch.zeros_like(captions_padded, dtype=torch.float)
168
+ for i, length in enumerate(caption_lengths):
169
+ caption_mask[i, :length] = 1.0
170
+
171
+ # Stack Images
172
+ pixel_values = torch.stack([b["pixel_values"] for b in batch])
173
+
174
+ # Pad Audio
175
+ # Audio specs are (F, T). T is variable.
176
+ # We want to pad T to max in batch or fixed max?
177
+ # User said "handle variable-length audio ... and caption lengths".
178
+ audios = [b["audio_spec"] for b in batch]
179
+ # Transpose to (T, F) for pad_sequence
180
+ audios_T = [a.transpose(0, 1) for a in audios]
181
+ audios_padded = pad_sequence(audios_T, batch_first=True, padding_value=0)
182
+ # Transpose back to (B, T, F) or keep as is?
183
+ # CNN14 expects (B, 1, T, F).
184
+ # Current audios_padded: (B, T, 64).
185
+ # We can unsqueeze later.
186
+
187
+ video_ids = [b["video_id"] for b in batch]
188
+
189
+ return {
190
+ "pixel_values": pixel_values,
191
+ "audio_spec": audios_padded, # (B, T, 64)
192
+ "captions": captions_padded,
193
+ "caption_mask": caption_mask,
194
+ "video_ids": video_ids
195
+ }
CoCap/cocap/data/datasets/compressed_video/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 7/15/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/data/datasets/compressed_video/compressed_video_utils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/12/9 20:42
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : compressed_video_utils.py
6
+
7
+ import numpy as np
8
+
9
+ from cocap.utils.image import byte_imread, byte_imwrite
10
+
11
+
12
+ def serialize(data_to_serialize, quality):
13
+ """
14
+ Compress rgb and residual by converting to JPEG format
15
+ :param data_to_serialize: python dict
16
+ :param quality: quality parameter for JPEG format
17
+ :return:
18
+ """
19
+ data = {}
20
+ for k, v in data_to_serialize.items():
21
+ if k in ["rgb_full", "rgb_gop", "residual"]:
22
+ data[k] = [byte_imwrite(img, quality=quality) for img in v]
23
+ else:
24
+ data[k] = v
25
+ return data
26
+
27
+
28
+ def deserialize(serialized_data):
29
+ """
30
+ Reverse version of serialize()
31
+ :param serialized_data:
32
+ :return:
33
+ """
34
+ data = {}
35
+ for k, v in serialized_data.items():
36
+ if k in ["rgb_full", "rgb_gop", "residual"]:
37
+ data[k] = [np.array(byte_imread(img)) for img in v]
38
+ else:
39
+ data[k] = v
40
+ return data
CoCap/cocap/data/datasets/compressed_video/dataset_msrvtt.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/17 16:54
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : dataset_msrvtt.py
6
+
7
+ import os
8
+ import random
9
+ from collections import defaultdict
10
+ from typing import Literal
11
+
12
+ import torch
13
+ from torch.utils import data
14
+ from torchvision import transforms
15
+
16
+ from cocap.modules.clip import clip
17
+ from cocap.utils.json import load_json
18
+ from .transforms import (DictNormalize, DictCenterCrop, DictRandomHorizontalFlip)
19
+ from .video_readers import VIDEO_READER_REGISTRY
20
+ from .video_text_base import get_video, CVConfig
21
+
22
+
23
+ class MSRVTTCaptioningDataset(data.Dataset):
24
+
25
+ def __init__(
26
+ self,
27
+ video_root: str,
28
+ max_words: int,
29
+ max_frames: int,
30
+ unfold_sentences: False,
31
+ video_size: tuple[int, int],
32
+ metadata: str,
33
+ video_reader: str,
34
+ cv_config: CVConfig,
35
+ split: Literal["train", "test"],
36
+ ):
37
+ self.split = split
38
+ self.video_root = video_root
39
+ self.max_words = max_words
40
+ self.max_frames = max_frames
41
+ self.unfold_sentences = unfold_sentences # only affect the train split
42
+ self.height, self.width = video_size
43
+ self.sentences = [] # (vid, [sentence, ...])
44
+ self.h265_cfg = cv_config
45
+ metadata = load_json(metadata)
46
+ video_ids = [metadata['videos'][idx]['video_id'] for idx in range(len(metadata['videos']))]
47
+ all_split_video_ids = {"train": video_ids[:6513], "val": video_ids[6513:6513 + 497],
48
+ "test": video_ids[6513 + 497:]}
49
+
50
+ split_video_ids = all_split_video_ids[split].copy()
51
+ if self.unfold_sentences:
52
+ for item in metadata["sentences"]:
53
+ if item["video_id"] in split_video_ids:
54
+ self.sentences.append([item["video_id"], [item["caption"]]])
55
+ if split == "test":
56
+ split_video_ids.remove(item["video_id"])
57
+ else:
58
+ vid2sentence = defaultdict(list)
59
+ for item in metadata["sentences"]:
60
+ if item["video_id"] in split_video_ids:
61
+ vid2sentence[item["video_id"]].append(item["caption"])
62
+ self.sentences = list(vid2sentence.items())
63
+
64
+ # self.sentences = self.sentences[:50000]
65
+ self.video_reader = VIDEO_READER_REGISTRY.get(video_reader)
66
+ # transforms
67
+ normalize = DictNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
68
+ if split == "train":
69
+ self.transform = transforms.Compose([
70
+ DictCenterCrop((self.height, self.width)),
71
+ DictRandomHorizontalFlip(),
72
+ normalize
73
+ ])
74
+ elif split == "test":
75
+ self.transform = transforms.Compose([
76
+ DictCenterCrop((self.height, self.width)),
77
+ normalize
78
+ ])
79
+ else:
80
+ raise NotImplementedError
81
+
82
+ if split == "test":
83
+ json_ref = {k: [] for k in all_split_video_ids[split]}
84
+ for sentence in metadata["sentences"]:
85
+ if sentence["video_id"] in json_ref:
86
+ json_ref[sentence["video_id"]].append(sentence["caption"])
87
+ # verify
88
+ assert all(len(v) == 20 for _, v in json_ref.items())
89
+ self.json_ref = {k[len("video"):]: v for k, v in json_ref.items()}
90
+
91
+ def __len__(self):
92
+ return len(self.sentences)
93
+
94
+ def _get_video_path(self, video_id):
95
+ return os.path.join(self.video_root, f"{video_id}.mp4")
96
+
97
+ def _get_video(self, video_id):
98
+ video, video_mask = get_video(video_reader=self.video_reader,
99
+ video_path=self._get_video_path(video_id),
100
+ max_frames=self.max_frames,
101
+ sample="rand" if self.split == "train" else "uniform",
102
+ hevc_config=self.h265_cfg)
103
+ if self.transform is not None:
104
+ video = self.transform(video)
105
+ return video, video_mask
106
+
107
+ def __getitem__(self, idx):
108
+ video_id, sentence_list = self.sentences[idx]
109
+ sentence = random.choice(sentence_list)
110
+
111
+ input_ids = clip.tokenize(sentence, context_length=self.max_words, truncate=True)[0]
112
+ input_mask = torch.zeros(self.max_words, dtype=torch.long)
113
+ input_mask[:len(clip._tokenizer.encode(sentence)) + 2] = 1
114
+
115
+ video, video_mask = self._get_video(video_id)
116
+ input_labels = torch.cat((input_ids[1:], torch.IntTensor([0])))
117
+ return {
118
+ # video
119
+ "video": video,
120
+ "video_mask": video_mask,
121
+ # text
122
+ "input_ids": input_ids,
123
+ "input_labels": input_labels,
124
+ "input_mask": input_mask,
125
+ # metadata
126
+ "metadata": (video_id, sentence)
127
+ }
CoCap/cocap/data/datasets/compressed_video/dataset_msvd.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/17 16:54
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : dataset_msvd.py
6
+
7
+ import os
8
+ import random
9
+ from collections import defaultdict
10
+ from typing import Literal
11
+
12
+ import torch
13
+ from torch.utils import data
14
+ from torchvision import transforms
15
+
16
+ from cocap.modules.clip import clip
17
+ from cocap.utils.json import load_json
18
+ from .transforms import (DictNormalize, DictCenterCrop, DictRandomHorizontalFlip)
19
+ from .video_readers import VIDEO_READER_REGISTRY
20
+ from .video_text_base import get_video, CVConfig
21
+
22
+
23
+ class MSVDCaptioningDataset(data.Dataset):
24
+ def __init__(
25
+ self,
26
+ video_root: str,
27
+ max_words: int,
28
+ max_frames: int,
29
+ unfold_sentences: False,
30
+ video_size: tuple[int, int],
31
+ metadata: str,
32
+ video_reader: str,
33
+ cv_config: CVConfig,
34
+ split: Literal["train", "test"],
35
+ ):
36
+ self.split = split
37
+ self.video_root = video_root
38
+ self.max_words = max_words
39
+ self.max_frames = max_frames
40
+ self.unfold_sentences = unfold_sentences # only affect the train split
41
+ self.height, self.width = video_size
42
+ self.sentences = [] # (vid, [sentence, ...])
43
+ self.h265_cfg = cv_config
44
+ metadata = load_json(metadata)
45
+
46
+ split_video_ids = metadata[split].copy()
47
+ if self.unfold_sentences:
48
+ for item in metadata["metadata"]:
49
+ if item["video_id"] in split_video_ids:
50
+ self.sentences.append([item["video_id"], [item["sentence"]]])
51
+ if split == "test":
52
+ split_video_ids.remove(item["video_id"])
53
+ else:
54
+ vid2sentence = defaultdict(list)
55
+ for item in metadata["metadata"]:
56
+ if item["video_id"] in split_video_ids:
57
+ vid2sentence[item["video_id"]].append(item["sentence"])
58
+ self.sentences = list(vid2sentence.items())
59
+
60
+ # self.sentences = self.sentences[:50000]
61
+ self.video_reader = VIDEO_READER_REGISTRY.get(video_reader)
62
+ # transforms
63
+ normalize = DictNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
64
+ if split == "train":
65
+ self.transform = transforms.Compose([
66
+ DictCenterCrop((self.height, self.width)),
67
+ DictRandomHorizontalFlip(),
68
+ normalize
69
+ ])
70
+ elif split == "test":
71
+ self.transform = transforms.Compose([
72
+ DictCenterCrop((self.height, self.width)),
73
+ normalize
74
+ ])
75
+ else:
76
+ raise NotImplementedError
77
+
78
+ if split == "test":
79
+ json_ref = {k: [] for k in metadata[split]}
80
+ for sentence in metadata["metadata"]:
81
+ if sentence["video_id"] in json_ref:
82
+ json_ref[sentence["video_id"]].append(sentence["sentence"])
83
+ self.json_ref = json_ref
84
+
85
+ def __len__(self):
86
+ return len(self.sentences)
87
+
88
+ def _get_video(self, video_id):
89
+ video, video_mask = get_video(video_reader=self.video_reader,
90
+ video_path=os.path.join(self.video_root, f"{video_id}.mp4"),
91
+ max_frames=self.max_frames,
92
+ sample="rand" if self.split == "train" else "uniform",
93
+ hevc_config=self.h265_cfg)
94
+ if self.transform is not None:
95
+ video = self.transform(video)
96
+ return video, video_mask
97
+
98
+ def __getitem__(self, idx):
99
+ video_id, sentence_list = self.sentences[idx]
100
+ sentence = random.choice(sentence_list)
101
+
102
+ input_ids = clip.tokenize(sentence, context_length=self.max_words, truncate=True)[0]
103
+ input_mask = torch.zeros(self.max_words, dtype=torch.long)
104
+ input_mask[:len(clip._tokenizer.encode(sentence)) + 2] = 1
105
+
106
+ video, video_mask = self._get_video(video_id)
107
+ input_labels = torch.cat((input_ids[1:], torch.IntTensor([0])))
108
+ return {
109
+ # video
110
+ "video": video,
111
+ "video_mask": video_mask,
112
+ # text
113
+ "input_ids": input_ids,
114
+ "input_labels": input_labels,
115
+ "input_mask": input_mask,
116
+ # metadata
117
+ "metadata": (video_id, sentence)
118
+ }
CoCap/cocap/data/datasets/compressed_video/dataset_vatex.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/17 16:54
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : dataset_vatex.py
6
+
7
+ import json
8
+ import os
9
+ import random
10
+ from collections import defaultdict
11
+ from typing import Literal
12
+
13
+ import torch
14
+ from torch.utils import data
15
+ from torchvision import transforms
16
+
17
+ from cocap.modules.clip import clip
18
+ from .transforms import (DictNormalize, DictCenterCrop, DictRandomHorizontalFlip)
19
+ from .video_readers import VIDEO_READER_REGISTRY
20
+ from .video_text_base import get_video, CVConfig
21
+
22
+
23
+ def load_json(file_path):
24
+ with open(file_path, "r") as f:
25
+ return json.load(f)
26
+
27
+
28
+ class VATEXCaptioningDataset(data.Dataset):
29
+
30
+ def __init__(
31
+ self,
32
+ video_root: str,
33
+ max_words: int,
34
+ max_frames: int,
35
+ unfold_sentences: False,
36
+ video_size: tuple[int, int],
37
+ metadata: str,
38
+ video_reader: str,
39
+ cv_config: CVConfig,
40
+ split: Literal["train", "test"],
41
+ ):
42
+ self.split = split
43
+ self.video_root = video_root
44
+ self.max_words = max_words
45
+ self.max_frames = max_frames
46
+ self.unfold_sentences = unfold_sentences # only affect the train split
47
+ self.height, self.width = video_size
48
+ self.sentences = [] # (vid, [sentence, ...])
49
+ self.h265_cfg = cv_config
50
+ metadata = load_json(metadata)
51
+
52
+ split_video_ids = metadata[split].copy()
53
+ if self.unfold_sentences:
54
+ for item in metadata["metadata"]:
55
+ if item["video_id"] in split_video_ids:
56
+ self.sentences.append([item["video_id"], [item["sentence"]]])
57
+ if split == "test":
58
+ split_video_ids.remove(item["video_id"])
59
+ else:
60
+ vid2sentence = defaultdict(list)
61
+ for item in metadata["metadata"]:
62
+ if item["video_id"] in split_video_ids:
63
+ vid2sentence[item["video_id"]].append(item["sentence"])
64
+ self.sentences = list(vid2sentence.items())
65
+
66
+ # self.sentences = self.sentences[:50000]
67
+ self.video_reader = VIDEO_READER_REGISTRY.get(video_reader)
68
+ # transforms
69
+ normalize = DictNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
70
+ if split == "train":
71
+ self.transform = transforms.Compose([
72
+ DictCenterCrop((self.height, self.width)),
73
+ DictRandomHorizontalFlip(),
74
+ normalize
75
+ ])
76
+ elif split == "test":
77
+ self.transform = transforms.Compose([
78
+ DictCenterCrop((self.height, self.width)),
79
+ normalize
80
+ ])
81
+ else:
82
+ raise NotImplementedError
83
+
84
+ if split == "test":
85
+ json_ref = {k: [] for k in metadata[split]}
86
+ for sentence in metadata["metadata"]:
87
+ if sentence["video_id"] in json_ref:
88
+ json_ref[sentence["video_id"]].append(sentence["sentence"])
89
+ self.json_ref = json_ref
90
+
91
+ def __len__(self):
92
+ return len(self.sentences)
93
+
94
+ def _get_video_path(self, video_id):
95
+ return os.path.join(self.video_root, f"{video_id}.mp4")
96
+
97
+ def _get_video(self, video_id):
98
+ video, video_mask = get_video(video_reader=self.video_reader,
99
+ video_path=self._get_video_path(video_id),
100
+ max_frames=self.max_frames,
101
+ sample="rand" if self.split == "train" else "uniform",
102
+ hevc_config=self.h265_cfg)
103
+ if self.transform is not None:
104
+ video = self.transform(video)
105
+ return video, video_mask
106
+
107
+ def __getitem__(self, idx):
108
+ video_id, sentence_list = self.sentences[idx]
109
+ sentence = random.choice(sentence_list)
110
+
111
+ input_ids = clip.tokenize(sentence, context_length=self.max_words, truncate=True)[0]
112
+ input_mask = torch.zeros(self.max_words, dtype=torch.long)
113
+ input_mask[:len(clip._tokenizer.encode(sentence)) + 2] = 1
114
+
115
+ video, video_mask = self._get_video(video_id)
116
+ input_labels = torch.cat((input_ids[1:], torch.IntTensor([0])))
117
+ return {
118
+ # video
119
+ "video": video,
120
+ "video_mask": video_mask,
121
+ # text
122
+ "input_ids": input_ids,
123
+ "input_labels": input_labels,
124
+ "input_mask": input_mask,
125
+ # metadata
126
+ "metadata": (video_id, sentence)
127
+ }
CoCap/cocap/data/datasets/compressed_video/transforms.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/7 14:51
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : transforms.py
6
+
7
+ from typing import Sequence
8
+
9
+ import einops
10
+ import torch
11
+ import torchvision.transforms.functional as F
12
+ from torchvision import transforms
13
+
14
+ __all__ = ["DictRandomResizedCrop", "DictCenterCrop", "DictResize", "DictNormalize"]
15
+
16
+ """
17
+ Custom transforms
18
+ """
19
+
20
+ IMAGE_KEYS = ["iframe", "residual", "motion_vector"]
21
+ NORMALIZE_KEYS = ["iframe"]
22
+
23
+
24
+ # iframe: ..., c, h, w
25
+ # motion vector: n_gop, n_mv, c, h, w
26
+ # residual: n_gop, n_res, c, h, w
27
+
28
+
29
+ class DictRandomResizedCrop(transforms.RandomResizedCrop):
30
+ """apply same random resized crop to the items in dict"""
31
+
32
+ def forward(self, img: dict):
33
+ assert any(k in img for k in IMAGE_KEYS), "input do not contain any valid image"
34
+ for k in IMAGE_KEYS:
35
+ if k in img:
36
+ i, j, h, w = self.get_params(img[k], self.scale, self.ratio)
37
+ break
38
+
39
+ ret = {}
40
+ for k, v in img.items():
41
+ if k not in IMAGE_KEYS:
42
+ ret[k] = v
43
+ continue
44
+ if len(v.shape) == 5: # handle extra dimension for GOP
45
+ num_gop = v.size(0)
46
+ num_frame = v.size(1)
47
+ v = einops.rearrange(v, "num_gop num_frame c h w->(num_gop num_frame) c h w")
48
+ if k == "motion_vector":
49
+ if isinstance(self.size, Sequence) and len(self.size) == 2:
50
+ mv_size = (self.size[0] // 4, self.size[1] // 4)
51
+ elif isinstance(self.size, int):
52
+ mv_size = self.size // 4
53
+ else:
54
+ raise ValueError("Image size is not supported: {}".format(self.size))
55
+ v = torch.stack(
56
+ [F.resized_crop(v[..., i, :, :], i // 4, j // 4, h // 4, w // 4, mv_size,
57
+ F.InterpolationMode.NEAREST)
58
+ for i in range(v.size(-3))],
59
+ dim=-3
60
+ )
61
+ else:
62
+ v = F.resized_crop(v, i, j, h, w, self.size, self.interpolation)
63
+ v = einops.rearrange(v, "(num_gop num_frame) c h w->num_gop num_frame c h w",
64
+ num_gop=num_gop, num_frame=num_frame)
65
+ ret[k] = v
66
+ else:
67
+ ret[k] = F.resized_crop(v, i, j, h, w, self.size, self.interpolation)
68
+ return ret
69
+
70
+
71
+ class DictRandomCrop(transforms.RandomCrop):
72
+
73
+ def forward(self, img: dict):
74
+ assert self.padding is None and not self.pad_if_needed, "Padding is not supported by DictRandoCrop"
75
+
76
+ assert any(k in img for k in IMAGE_KEYS), "input do not contain any valid image"
77
+ for k in IMAGE_KEYS:
78
+ if k in img:
79
+ i, j, h, w = self.get_params(img[k], self.size)
80
+ break
81
+
82
+ ret = {}
83
+ for k, v in img.items():
84
+ if k not in IMAGE_KEYS:
85
+ ret[k] = v
86
+ continue
87
+ if len(v.shape) == 5: # handle extra dimension for GOP
88
+ num_gop = v.size(0)
89
+ num_frame = v.size(1)
90
+ v = einops.rearrange(v, "num_gop num_frame c h w->(num_gop num_frame) c h w")
91
+ if k == "motion_vector":
92
+ v = torch.stack(
93
+ [F.crop(v[..., i, :, :], i // 4, j // 4, h // 4, w // 4) for i in range(v.size(-3))],
94
+ dim=-3
95
+ )
96
+ else:
97
+ v = F.crop(v, i, j, h, w)
98
+ v = einops.rearrange(v, "(num_gop num_frame) c h w->num_gop num_frame c h w",
99
+ num_gop=num_gop, num_frame=num_frame)
100
+ ret[k] = v
101
+ else:
102
+ ret[k] = F.crop(v, i, j, h, w)
103
+ return ret
104
+
105
+
106
+ class DictCenterCrop(transforms.CenterCrop):
107
+ def forward(self, img: dict):
108
+ ret = {}
109
+ for k, v in img.items():
110
+ if k not in IMAGE_KEYS:
111
+ ret[k] = v
112
+ continue
113
+ if len(v.shape) == 5: # handle extra dimension for GOP
114
+ num_gop = v.size(0)
115
+ num_frame = v.size(1)
116
+ v = einops.rearrange(v, "num_gop num_frame c h w->(num_gop num_frame) c h w")
117
+ if k == "motion_vector":
118
+ if isinstance(self.size, Sequence) and len(self.size) == 2:
119
+ mv_size = (self.size[0] // 4, self.size[1] // 4)
120
+ elif isinstance(self.size, int):
121
+ mv_size = self.size // 4
122
+ else:
123
+ raise ValueError("Image size is not supported: {}".format(self.size))
124
+ v = torch.stack(
125
+ [F.center_crop(v[..., i, :, :], mv_size)
126
+ for i in range(v.size(-3))],
127
+ dim=-3
128
+ )
129
+ else:
130
+ v = F.center_crop(v, self.size)
131
+ v = einops.rearrange(v, "(num_gop num_frame) c h w->num_gop num_frame c h w",
132
+ num_gop=num_gop, num_frame=num_frame)
133
+ ret[k] = v
134
+ else:
135
+ ret[k] = F.center_crop(v, self.size)
136
+ return ret
137
+
138
+
139
+ class DictResize(transforms.Resize):
140
+
141
+ def forward(self, img: dict):
142
+ ret = {}
143
+ for k, v in img.items():
144
+ if k not in IMAGE_KEYS:
145
+ ret[k] = v
146
+ continue
147
+ if len(v.shape) == 5: # handle extra dimension for GOP
148
+ num_gop = v.size(0)
149
+ num_frame = v.size(1)
150
+ v = einops.rearrange(v, "num_gop num_frame c h w->(num_gop num_frame) c h w")
151
+ if k == "motion_vector":
152
+ if isinstance(self.size, Sequence) and len(self.size) == 2:
153
+ mv_size = (self.size[0] // 4, self.size[1] // 4)
154
+ elif isinstance(self.size, int):
155
+ mv_size = self.size // 4
156
+ else:
157
+ raise ValueError("Image size is not supported: {}".format(self.size))
158
+ v = torch.stack(
159
+ [F.resize(v[..., i, :, :], mv_size, F.InterpolationMode.NEAREST, self.max_size, self.antialias)
160
+ for i in range(v.size(-3))],
161
+ dim=-3
162
+ )
163
+ else:
164
+ v = F.resize(v, self.size, self.interpolation, self.max_size, self.antialias)
165
+ v = einops.rearrange(v, "(num_gop num_frame) c h w->num_gop num_frame c h w",
166
+ num_gop=num_gop, num_frame=num_frame)
167
+ ret[k] = v
168
+ else:
169
+ ret[k] = F.resize(v, self.size, self.interpolation, self.max_size, self.antialias)
170
+ return ret
171
+
172
+
173
+ class DictNormalize(transforms.Normalize):
174
+ def forward(self, data: dict):
175
+ return {k: F.normalize(v, self.mean, self.std, self.inplace) if k in NORMALIZE_KEYS else v
176
+ for k, v in data.items()}
177
+
178
+
179
+ class DictRandomHorizontalFlip(transforms.RandomHorizontalFlip):
180
+ def forward(self, img: dict):
181
+ if torch.rand(1) < self.p:
182
+ return {k: F.hflip(v) if k in IMAGE_KEYS else v for k, v in img.items()}
183
+ else:
184
+ return img
CoCap/cocap/data/datasets/compressed_video/video_readers.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/8/10 14:56
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : video_readers.py
6
+
7
+ # based on https://github.com/m-bain/frozen-in-time/blob/main/base/base_dataset.py
8
+
9
+
10
+ import logging
11
+ import pickle
12
+ import random
13
+ import subprocess
14
+ import traceback
15
+ from typing import Dict
16
+
17
+ import cv_reader
18
+ import decord
19
+ import lz4.frame
20
+ import numpy as np
21
+ import torch
22
+ import torch.nn.functional
23
+ from fvcore.common.registry import Registry
24
+
25
+ from cocap.utils.profile import Timer
26
+ from .compressed_video_utils import deserialize
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ VIDEO_READER_REGISTRY = Registry("VIDEO_READER")
31
+
32
+
33
+ def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
34
+ # acc_samples = min(num_frames, vlen)
35
+ intervals = np.linspace(start=0, stop=vlen, num=num_frames + 1).astype(int)
36
+ ranges = []
37
+ for idx, interv in enumerate(intervals[:-1]):
38
+ ranges.append((interv, intervals[idx + 1]))
39
+ if sample == 'rand':
40
+ frame_idxs = [random.choice(range(x[0], x[1])) if x[0] != x[1] else x[0] for x in ranges]
41
+ elif fix_start is not None:
42
+ frame_idxs = [x[0] + fix_start for x in ranges]
43
+ elif sample == 'uniform':
44
+ frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
45
+ else:
46
+ raise NotImplementedError
47
+
48
+ return frame_idxs
49
+
50
+
51
+ @VIDEO_READER_REGISTRY.register()
52
+ def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None):
53
+ import cv2
54
+
55
+ cap = cv2.VideoCapture(video_path)
56
+ assert (cap.isOpened())
57
+ vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
58
+ # get indexes of sampled frames
59
+ frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
60
+ frames = []
61
+ success_idxs = []
62
+ for index in frame_idxs:
63
+ cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1)
64
+ ret, frame = cap.read()
65
+ if ret:
66
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
67
+ frame = torch.from_numpy(frame)
68
+ # (H x W x C) to (C x H x W)
69
+ frame = frame.permute(2, 0, 1)
70
+ frames.append(frame)
71
+ success_idxs.append(index)
72
+ else:
73
+ pass
74
+ # print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
75
+
76
+ frames = torch.stack(frames).float() / 255
77
+ cap.release()
78
+ return frames, success_idxs
79
+
80
+
81
+ @VIDEO_READER_REGISTRY.register()
82
+ def read_frames_av(video_path, num_frames, sample='rand', fix_start=None):
83
+ import av
84
+
85
+ reader = av.open(video_path)
86
+ try:
87
+ frames = []
88
+ frames = [torch.from_numpy(f.to_rgb().to_ndarray()) for f in reader.decode(video=0)]
89
+ except (RuntimeError, ZeroDivisionError) as exception:
90
+ print('{}: WEBM reader cannot open {}. Empty '
91
+ 'list returned.'.format(type(exception).__name__, video_path))
92
+ vlen = len(frames)
93
+ frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
94
+ frames = torch.stack([frames[idx] for idx in frame_idxs]).float() / 255
95
+ frames = frames.permute(0, 3, 1, 2)
96
+ return frames, frame_idxs
97
+
98
+
99
+ @VIDEO_READER_REGISTRY.register()
100
+ def read_frames_decord(video_path, num_frames, sample='rand', fix_start=None):
101
+ import decord
102
+ decord.bridge.set_bridge("torch")
103
+
104
+ video_reader = decord.VideoReader(video_path, num_threads=1)
105
+ vlen = len(video_reader)
106
+ frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
107
+ frames = video_reader.get_batch(frame_idxs)
108
+ frames = frames.float() / 255
109
+ frames = frames.permute(0, 3, 1, 2)
110
+ return frames, frame_idxs
111
+
112
+
113
+ def get_video_size(video_path):
114
+ import cv2
115
+
116
+ vcap = cv2.VideoCapture(video_path) # 0=camera
117
+ if vcap.isOpened():
118
+ width = vcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
119
+ height = vcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
120
+ return int(width), int(height)
121
+ else:
122
+ raise RuntimeError(f"VideoCapture cannot open video file: {video_path}")
123
+
124
+
125
+ def get_frame_type(video_path):
126
+ command = '/usr/bin/ffprobe -v error -show_entries frame=pict_type -of default=noprint_wrappers=1'.split()
127
+ out = subprocess.check_output(command + [video_path]).decode()
128
+ frame_types = out.replace('pict_type=', '').split()
129
+ return frame_types
130
+
131
+
132
+ def pad_tensor(tensor: torch.Tensor, target_size: int, dim: int, pad_value=0):
133
+ pad_shape = list(tensor.shape)
134
+ pad_shape[dim] = target_size - pad_shape[dim]
135
+ return torch.concat([tensor, torch.full(pad_shape, device=tensor.device, dtype=tensor.dtype, fill_value=pad_value)],
136
+ dim=dim)
137
+
138
+
139
+ @VIDEO_READER_REGISTRY.register()
140
+ def read_frames_compressed_domain(
141
+ video_path: str,
142
+ resample_num_gop: int, resample_num_mv: int, resample_num_res: int,
143
+ with_residual: bool = False, with_bp_rgb: bool = False, pre_extract: bool = False,
144
+ sample: str = "rand"
145
+ ) -> Dict[str, np.ndarray]:
146
+ """
147
+ This function process the output of `cv_reader` to obtain the inputs for training
148
+ :param video_path: path to the video
149
+ :param resample_num_gop: number of GOP sampled from the video
150
+ :param resample_num_mv: number of motion vectors sampled from each GOP
151
+ :param resample_num_res: number of residuals sampled from each GOP
152
+ :param with_residual: whether to return residual
153
+ :param with_bp_rgb: also return the decoded RGB frames of the video
154
+ :param pre_extract: use pre-extracted data
155
+ :param sample: sample method
156
+ :return:
157
+ """
158
+ decord.bridge.set_bridge("torch")
159
+ assert sample in {"rand", "uniform", "pad"}
160
+ try:
161
+ timer = Timer()
162
+ reader = decord.VideoReader(video_path, num_threads=1)
163
+ timer("check_video_length")
164
+ # load data from video file/pre-extracted feature
165
+ if not pre_extract:
166
+ reader_ret = cv_reader.read_video(video_path)
167
+ timer("cv_reader")
168
+ else:
169
+ data = {}
170
+ read_type = ["pict_type", "rgb_gop"]
171
+ if with_residual:
172
+ read_type += ["residual"]
173
+ if with_bp_rgb:
174
+ read_type += ["rgb_full"]
175
+ else:
176
+ read_type += ["motion_vector"]
177
+ for t in read_type:
178
+ if t in ['motion_vector', 'rgb_full', 'residual']:
179
+ with lz4.frame.open(f"{video_path}.{t}", "rb") as f:
180
+ data.update(pickle.load(f))
181
+ else:
182
+ with open(f"{video_path}.{t}", "rb") as f:
183
+ data.update(pickle.load(f))
184
+ timer("read")
185
+ data = deserialize(data)
186
+ timer("deserialize")
187
+ reader_ret = [{} for _ in range(len(data["pict_type"]))]
188
+ for k, v_list in data.items():
189
+ k = "rgb" if k == "rgb_full" else k # replace rgb_full with rgb
190
+ if k == "rgb_gop":
191
+ idx_iframe = 0
192
+ for i, t in enumerate(data["pict_type"]):
193
+ if t == "I":
194
+ reader_ret[i]["rgb"] = v_list[idx_iframe]
195
+ idx_iframe += 1
196
+ assert idx_iframe == len(v_list)
197
+ else:
198
+ for i, v in enumerate(v_list):
199
+ reader_ret[i][k] = v
200
+ timer("format")
201
+ full_frame_gop = []
202
+ for f in reader_ret:
203
+ if f["pict_type"] == "I":
204
+ full_frame_gop.append([f, ])
205
+ else:
206
+ full_frame_gop[-1].append(f)
207
+ full_frame_gop = [g for g in full_frame_gop if len(g) > 2] # remove gop which do not contain any B/P-frame
208
+ # sample B/P-frame for each gop
209
+ i_frame_gop = []
210
+ mv_frame_gop = []
211
+ res_frame_gop = []
212
+ for gop_idx in range(len(full_frame_gop)):
213
+ i_frame_gop.append(full_frame_gop[gop_idx][0])
214
+ if sample == "pad":
215
+ mv_frame_gop.append(full_frame_gop[gop_idx][1: 1 + resample_num_mv])
216
+ # I-frame is not included in residual, although I-frame also contains valid residual
217
+ res_frame_gop.append(full_frame_gop[gop_idx][1:1 + resample_num_res])
218
+ else:
219
+ idxs = sample_frames(num_frames=resample_num_mv, vlen=len(full_frame_gop[gop_idx]) - 1, sample="rand")
220
+ mv_frame_gop.append([full_frame_gop[gop_idx][i + 1] for i in idxs])
221
+ idxs = sample_frames(num_frames=resample_num_res, vlen=len(full_frame_gop[gop_idx]) - 1, sample="rand")
222
+ res_frame_gop.append([full_frame_gop[gop_idx][1 + i] for i in idxs])
223
+ # sample gop
224
+ if sample == "pad":
225
+ i_frame_gop = i_frame_gop[:resample_num_gop]
226
+ mv_frame_gop = mv_frame_gop[:resample_num_gop]
227
+ res_frame_gop = res_frame_gop[:resample_num_gop]
228
+ else:
229
+ idxs = sample_frames(num_frames=resample_num_gop, vlen=len(mv_frame_gop), sample=sample)
230
+ i_frame_gop = [i_frame_gop[i] for i in idxs]
231
+ mv_frame_gop = [mv_frame_gop[i] for i in idxs]
232
+ res_frame_gop = [res_frame_gop[i] for i in idxs]
233
+ timer("sample")
234
+ # stack iframe
235
+ if with_bp_rgb or pre_extract:
236
+ iframe = [cur_gop["rgb"] for cur_gop in i_frame_gop]
237
+ iframe = torch.stack([torch.from_numpy(f) for f in iframe]).permute(0, 3, 1, 2) / 255
238
+ else:
239
+ iframe_idx = [cur_gop["frame_idx"] for cur_gop in i_frame_gop]
240
+ iframe = reader.get_batch(iframe_idx).permute(0, 3, 1, 2) / 255
241
+ input_mask_gop = torch.tensor([0] * iframe.size(0) + [1] * (resample_num_gop - iframe.size(0)),
242
+ dtype=torch.bool)
243
+ if sample == "pad" and iframe.size(0) < resample_num_gop:
244
+ iframe = pad_tensor(iframe, target_size=resample_num_gop, dim=0)
245
+ assert iframe.size(0) == resample_num_gop
246
+ timer("stack_iframe")
247
+ # encode motion
248
+ assert mv_frame_gop[0][0]["motion_vector"].shape[-1] == 4, \
249
+ "format is avc, but motion vector has {} !=4 dims".format(mv_frame_gop[0][0]["motion_vector"].shape[-1])
250
+ # h264 motion vector
251
+ for g in mv_frame_gop:
252
+ for f in g:
253
+ if "encoded" in f and f["encoded"]:
254
+ continue
255
+ else:
256
+ f["motion_vector"] = torch.from_numpy(
257
+ f["motion_vector"].transpose((2, 0, 1)).astype(np.float32)
258
+ )
259
+ f["encoded"] = True
260
+ timer("encode_motion")
261
+ # stack mv
262
+ motion_vector = []
263
+ type_ids_mv = []
264
+ input_mask_mv = []
265
+ for gop_idx in range(len(mv_frame_gop)):
266
+ gop_mv = torch.stack([f["motion_vector"] for f in mv_frame_gop[gop_idx]])
267
+ input_mask_mv.append(torch.tensor([0] * gop_mv.size(0) + [1] * (resample_num_mv - gop_mv.size(0)),
268
+ dtype=torch.bool))
269
+ type_ids_mv.append(torch.tensor([0 if f["pict_type"] == "P" else 1 for f in mv_frame_gop[gop_idx]] +
270
+ [2] * (resample_num_mv - gop_mv.size(0)), dtype=torch.long))
271
+ if sample == "pad" and gop_mv.size(0) < resample_num_mv: # pad for mv in each gop
272
+ gop_mv = pad_tensor(gop_mv, target_size=resample_num_mv, dim=0)
273
+ assert gop_mv.size(0) == resample_num_mv
274
+ motion_vector.append(gop_mv)
275
+ motion_vector = torch.stack(motion_vector)
276
+ type_ids_mv = torch.stack(type_ids_mv)
277
+ input_mask_mv = torch.stack(input_mask_mv)
278
+ # pad for gop
279
+ if sample == "pad" and motion_vector.size(0) < resample_num_gop: # pad for gop number
280
+ motion_vector = pad_tensor(motion_vector, target_size=resample_num_gop, dim=0)
281
+ type_ids_mv = pad_tensor(type_ids_mv, target_size=resample_num_gop, dim=0, pad_value=2)
282
+ input_mask_mv = pad_tensor(input_mask_mv, target_size=resample_num_gop, dim=0, pad_value=1)
283
+ assert motion_vector.size(0) == resample_num_gop, \
284
+ "motion vector gop number is not correct, got {}, expect {}".format(motion_vector.size(0), resample_num_gop)
285
+ assert motion_vector.size(1) == resample_num_mv, \
286
+ "motion vector mv number is not correct, got {}, expect {}".format(motion_vector.size(1), resample_num_mv)
287
+ timer("stack_motion")
288
+ ret = {"iframe": iframe, "motion_vector": motion_vector,
289
+ "input_mask_gop": input_mask_gop, "input_mask_mv": input_mask_mv, "type_ids_mv": type_ids_mv}
290
+ if with_residual:
291
+ residual = []
292
+ input_mask_res = []
293
+ for gop_idx in range(len(mv_frame_gop)):
294
+ gop_res = torch.stack(
295
+ [torch.from_numpy(f["residual"].transpose(2, 0, 1)) for f in res_frame_gop[gop_idx]]
296
+ )
297
+ input_mask_res.append(torch.tensor([0] * gop_res.size(0) + [1] * (resample_num_res - gop_res.size(0)),
298
+ dtype=torch.bool))
299
+ if sample == "pad" and gop_res.size(0) < resample_num_res:
300
+ gop_res = pad_tensor(gop_res, target_size=resample_num_res, dim=0)
301
+ residual.append(gop_res)
302
+ residual = torch.stack(residual)
303
+ input_mask_res = torch.stack(input_mask_res)
304
+ if sample == "pad" and residual.size(0) < resample_num_gop: # pad for gop number
305
+ residual = pad_tensor(residual, target_size=resample_num_gop, dim=0, pad_value=128)
306
+ input_mask_res = pad_tensor(input_mask_res, target_size=resample_num_gop, dim=0, pad_value=1)
307
+ ret["residual"] = residual
308
+ ret["input_mask_res"] = input_mask_res
309
+ timer("stack_residual")
310
+ if with_bp_rgb:
311
+ # stack B/P-frame RGB
312
+ bp_rgb = torch.stack(
313
+ [torch.stack([torch.from_numpy(f["rgb"]).permute(2, 0, 1) for f in g])
314
+ for g in mv_frame_gop]) / 255
315
+ ret["bp_rgb"] = bp_rgb
316
+ timer("stack_bp_rgb")
317
+ logger.debug(timer.get_info(averaged=False)) # debug output about speed
318
+ return ret, True
319
+ except Exception: # TODO: too broad exception
320
+ print(f"video load error: {video_path}")
321
+ traceback.print_exc()
322
+ traceback.print_exc(file=open("video_reader_error.log", "a"))
323
+ # create a dummy return data
324
+ ret = {
325
+ "iframe": torch.zeros((resample_num_gop, 3, 224, 224), dtype=torch.float),
326
+ "motion_vector": torch.zeros((resample_num_gop, resample_num_mv, 4, 56, 56), dtype=torch.float),
327
+ "input_mask_gop": torch.ones((resample_num_gop,), dtype=torch.bool),
328
+ "input_mask_mv": torch.ones((resample_num_gop, resample_num_mv), dtype=torch.bool),
329
+ "input_mask_res": torch.ones((resample_num_gop, resample_num_mv), dtype=torch.bool),
330
+ "type_ids_mv": torch.zeros((resample_num_gop, resample_num_mv), dtype=torch.long)
331
+ }
332
+ if with_residual:
333
+ ret["residual"] = torch.zeros((resample_num_gop, resample_num_res, 3, 224, 224), dtype=torch.uint8)
334
+ if with_bp_rgb:
335
+ ret["bp_rgb"] = torch.zeros((resample_num_gop, resample_num_mv, 3, 224, 224), dtype=torch.float)
336
+ return ret, False
337
+
338
+
339
+ def get_video_len(video_path):
340
+ import cv2
341
+
342
+ cap = cv2.VideoCapture(video_path)
343
+ if not (cap.isOpened()):
344
+ return False
345
+ vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
346
+ cap.release()
347
+ return vlen
CoCap/cocap/data/datasets/compressed_video/video_text_base.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/12/3 14:54
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : video_text_base.py
6
+ import logging
7
+ import os.path
8
+ import random
9
+ from dataclasses import dataclass
10
+
11
+ import torch
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ def get_tokenized_words(sentence: str, tokenizer, max_words):
17
+ words = tokenizer.tokenize(sentence)
18
+ words = ["[CLS]"] + words
19
+ total_length_with_cls = max_words - 1
20
+ if len(words) > total_length_with_cls:
21
+ words = words[:total_length_with_cls]
22
+ words = words + ["[SEP]"]
23
+ return words
24
+
25
+
26
+ def get_text_inputs(sentence: str, tokenizer, max_words):
27
+ """
28
+ 1. tokenize
29
+ 2. add [CLS] and [SEP] token, limit the length
30
+ 3. create mask and token type
31
+ 4. pad to max_words
32
+ :param sentence:
33
+ :param tokenizer:
34
+ :param max_words:
35
+ :return: 1 dim tensor, shape is (max_words,)
36
+ """
37
+ words = get_tokenized_words(sentence, tokenizer, max_words)
38
+ input_ids = tokenizer.convert_tokens_to_ids(words)
39
+ input_mask = [1] * len(input_ids) # 1 is keep, 0 is mask out
40
+ segment_ids = [0] * len(input_ids)
41
+ while len(input_ids) < max_words:
42
+ input_ids.append(0)
43
+ input_mask.append(0)
44
+ segment_ids.append(0)
45
+ assert len(input_ids) == len(input_mask) == len(segment_ids) == max_words
46
+ return torch.tensor(input_ids), torch.tensor(input_mask), torch.tensor(segment_ids)
47
+
48
+
49
+ def get_text_inputs_with_mlm(sentence: str, tokenizer, max_words):
50
+ """
51
+ Add mlm inputs and labels based on `get_text_inputs`
52
+ :param sentence:
53
+ :param tokenizer:
54
+ :param max_words:
55
+ :return: 1 dim tensor, shape is (max_words,)
56
+ """
57
+ input_ids, input_mask, segment_ids = get_text_inputs(sentence, tokenizer, max_words)
58
+
59
+ # Mask Language Model <-----
60
+ token_labels = []
61
+ masked_tokens = get_tokenized_words(sentence, tokenizer, max_words)
62
+ for token_id, token in enumerate(masked_tokens):
63
+ if token_id == 0 or token_id == len(masked_tokens) - 1:
64
+ token_labels.append(-1)
65
+ continue
66
+ prob = random.random()
67
+ # mask token with 15% probability
68
+ if prob < 0.15:
69
+ prob /= 0.15
70
+ # 80% randomly change token to mask token
71
+ if prob < 0.8:
72
+ masked_tokens[token_id] = "[MASK]"
73
+ # 10% randomly change token to random token
74
+ elif prob < 0.9:
75
+ masked_tokens[token_id] = random.choice(list(tokenizer.vocab.items()))[0]
76
+ # -> rest 10% randomly keep current token
77
+ # append current token to output (we will predict these later)
78
+ try:
79
+ token_labels.append(tokenizer.vocab[token])
80
+ except KeyError:
81
+ # For unknown words (should not occur with BPE vocab)
82
+ token_labels.append(tokenizer.vocab["[UNK]"])
83
+ logger.debug("Cannot find token '{}' in vocab. Using [UNK] instead".format(token))
84
+ else:
85
+ # no masking token (will be ignored by loss function later)
86
+ token_labels.append(-1)
87
+ # -----> Mask Language Model
88
+ masked_token_ids = tokenizer.convert_tokens_to_ids(masked_tokens)
89
+
90
+ while len(masked_token_ids) < max_words:
91
+ masked_token_ids.append(0)
92
+ token_labels.append(-1)
93
+ assert len(masked_token_ids) == len(token_labels) == max_words
94
+ return input_ids, input_mask, segment_ids, torch.tensor(masked_token_ids), torch.tensor(token_labels)
95
+
96
+
97
+ @dataclass
98
+ class CVConfig:
99
+ num_gop: int
100
+ num_mv: int
101
+ num_res: int
102
+ with_residual: bool
103
+ use_pre_extract: bool
104
+ sample: str
105
+
106
+
107
+ def get_video(video_reader, video_path, max_frames, sample, hevc_config: None | CVConfig = None):
108
+ assert os.path.exists(video_path), f"Video file not found: {video_path}"
109
+ video_mask = torch.ones((max_frames,), dtype=torch.int)
110
+ if video_reader.__name__ in ["read_frames_compressed_domain"]:
111
+ assert hevc_config is not None, "hevc_config should be set when using read_frames_compressed_domain"
112
+ video, _ = video_reader(video_path,
113
+ resample_num_gop=hevc_config.num_gop, resample_num_mv=hevc_config.num_mv,
114
+ resample_num_res=hevc_config.num_res,
115
+ with_residual=hevc_config.with_residual,
116
+ pre_extract=hevc_config.use_pre_extract,
117
+ sample=hevc_config.sample if hevc_config.sample == "pad" else sample)
118
+ else:
119
+ video, _ = video_reader(video_path, max_frames, sample)
120
+ return video, video_mask
CoCap/cocap/modeling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/11 16:24
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/modeling/av_captioner.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from cocap.modules.clip.model import ModifiedResNet
4
+ from cocap.modules.audio_encoder import CNN14, BEATsAudioEncoder
5
+
6
+ class AVCaptioner(nn.Module):
7
+ def __init__(
8
+ self,
9
+ embed_dim=1024,
10
+ vision_layers=[3, 4, 6, 3],
11
+ vision_width=64,
12
+ vision_heads=32,
13
+ audio_pretrained=False,
14
+ vocab_size=49408,
15
+ max_len=77,
16
+ num_decoder_layers=6,
17
+ num_heads=8,
18
+ dim_feedforward=2048,
19
+ dropout=0.1,
20
+ clip_path=None,
21
+ audio_path=None,
22
+ audio_enc_type="beats" # "cnn14" or "beats"
23
+ ):
24
+ super().__init__()
25
+
26
+ # Visual Encoder
27
+ self.visual_encoder = ModifiedResNet(
28
+ layers=vision_layers,
29
+ output_dim=1024,
30
+ heads=vision_heads,
31
+ input_resolution=224,
32
+ width=vision_width
33
+ )
34
+
35
+ embed_dim = 1024
36
+
37
+ # Audio Encoder
38
+ if audio_enc_type == "beats":
39
+ self.audio_encoder = BEATsAudioEncoder(model_path=audio_path)
40
+ # BEATs Base outputs 768. Large 1024.
41
+ # Assuming Base (768) for "iter3+ as2m" standard checkpoint.
42
+ audio_dim = 768
43
+ # If large, use 1024. Check config?
44
+ # We can check self.audio_encoder.cfg.encoder_embed_dim
45
+ if hasattr(self.audio_encoder, 'cfg'):
46
+ audio_dim = self.audio_encoder.cfg.encoder_embed_dim
47
+
48
+ else:
49
+ self.audio_encoder = CNN14()
50
+ audio_dim = 2048
51
+ if audio_path:
52
+ self.audio_encoder.load_from_pretrain(audio_path)
53
+
54
+ self.audio_proj = nn.Linear(audio_dim, embed_dim)
55
+
56
+ # Decoder
57
+ decoder_layer = nn.TransformerDecoderLayer(
58
+ d_model=embed_dim,
59
+ nhead=num_heads,
60
+ dim_feedforward=dim_feedforward,
61
+ dropout=dropout,
62
+ batch_first=True
63
+ )
64
+ self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)
65
+
66
+ # Embeddings
67
+ self.token_embedding = nn.Embedding(vocab_size, embed_dim)
68
+ self.positional_embedding = nn.Parameter(torch.randn(max_len, embed_dim))
69
+
70
+ self.vocab_size = vocab_size
71
+ self.max_len = max_len
72
+ self.head = nn.Linear(embed_dim, vocab_size, bias=False)
73
+
74
+ # Share weights
75
+ self.head.weight = self.token_embedding.weight
76
+
77
+ if clip_path:
78
+ self.load_visual_weights(clip_path)
79
+
80
+ def load_visual_weights(self, path):
81
+ import os
82
+ if not os.path.exists(path):
83
+ print(f"CLIP model not found at {path}")
84
+ return
85
+
86
+ print(f"Loading CLIP from {path}")
87
+ jit_model = torch.jit.load(path, map_location="cpu")
88
+ state_dict = jit_model.state_dict()
89
+
90
+ visual_sd = {}
91
+ for k, v in state_dict.items():
92
+ if k.startswith("visual."):
93
+ new_key = k[7:] # remove "visual."
94
+ visual_sd[new_key] = v
95
+
96
+ msg = self.visual_encoder.load_state_dict(visual_sd, strict=False)
97
+ print(f"Visual Encoder Loaded: {msg}")
98
+
99
+ def forward(self, images, audios, captions, caption_mask=None):
100
+ """
101
+ images: (B, 3, H, W)
102
+ audios: (B, T_audio, F_mel) or (B, 1, T, F)
103
+ captions: (B, L_cap) indices
104
+ caption_mask: (B, L_cap) boolean or 0/1 mask. True/1 means valid.
105
+ """
106
+
107
+ # 1. Encode Images
108
+ # ModifiedResNet returns (B, embed_dim) ??
109
+ # Wait, let's check ModifiedResNet output.
110
+ # It calls attnpool which returns (B, embed_dim).
111
+ # We usually want a sequence for cross-attention, or at least we want the spatial features.
112
+ # But the prompt says "Visual Encoder: Use the ModifiedResNet ... AttentionPool2d"
113
+ # AttentionPool2d reduces spatial dim to 1 vector in standard CLIP.
114
+ # If we want detailed attention, we might want to skip the final pooling or unsqueeze it.
115
+ # Let's assume we use the global visual feature for now as typically done in simple captioners,
116
+ # OR we can modify it to return spatial features.
117
+ # For "High performance", spatial features are better.
118
+ # The attnpool in existing code:
119
+ # x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
120
+ # x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
121
+ # x, _ = F.multi_head_attention_forward(...) -> returns x.squeeze(0) which is (B, D).
122
+ # So it returns a single vector.
123
+ # Let's stick to using this single vector 'v' plus audio vector 'a'.
124
+ # Or expand them.
125
+
126
+ v_features = self.visual_encoder(images) # (B, D)
127
+ v_features = v_features.unsqueeze(1) # (B, 1, D)
128
+
129
+ # 2. Encode Audio
130
+ a_features = self.audio_encoder(audios) # (B, 2048)
131
+ a_features = self.audio_proj(a_features) # (B, D)
132
+ a_features = a_features.unsqueeze(1) # (B, 1, D)
133
+
134
+ # 3. Concatenate
135
+ memory = torch.cat([v_features, a_features], dim=1) # (B, 2, D)
136
+
137
+ # 4. Decode
138
+ # captions: (B, L)
139
+ # Shift captions for teacher forcing
140
+ # targets usually: captions[:, 1:]
141
+ # inputs: captions[:, :-1]
142
+
143
+ tgt_seq = captions
144
+ tgt_emb = self.token_embedding(tgt_seq) # (B, L, D)
145
+
146
+ # Add position embeddings
147
+ seq_len = tgt_seq.size(1)
148
+ positions = torch.arange(0, seq_len, device=tgt_seq.device).unsqueeze(0)
149
+ tgt_emb = tgt_emb + self.positional_embedding[positions]
150
+
151
+ # Causal Mask
152
+ tgt_mask = self.generate_square_subsequent_mask(seq_len).to(tgt_seq.device)
153
+
154
+ # Padding Mask (for key_padding_mask)
155
+ # caption_mask is 1 for valid, 0 for pad.
156
+ # nn.Transformer expects True for PAD (to ignore).
157
+ # If caption_mask is provided as 1=valid, 0=pad, we invert it.
158
+ key_padding_mask = None
159
+ if caption_mask is not None:
160
+ key_padding_mask = (caption_mask == 0)
161
+
162
+ output = self.decoder(
163
+ tgt=tgt_emb,
164
+ memory=memory,
165
+ tgt_mask=tgt_mask,
166
+ tgt_key_padding_mask=key_padding_mask
167
+ )
168
+
169
+ logits = self.head(output)
170
+ return logits
171
+
172
+ @torch.no_grad()
173
+ def generate(self, images, audios, max_len=20, start_token=49406, end_token=49407):
174
+ """
175
+ Greedy decoding.
176
+ """
177
+ B = images.size(0)
178
+
179
+ # Encode
180
+ v_features = self.visual_encoder(images).unsqueeze(1) # (B, 1, D)
181
+ a_features = self.audio_proj(self.audio_encoder(audios)).unsqueeze(1) # (B, 1, D)
182
+ memory = torch.cat([v_features, a_features], dim=1)
183
+
184
+ # Decode
185
+ # Start token
186
+ tgt = torch.full((B, 1), start_token, dtype=torch.long, device=images.device)
187
+
188
+ finished = torch.zeros(B, dtype=torch.bool, device=images.device)
189
+
190
+ for i in range(max_len):
191
+ tgt_emb = self.token_embedding(tgt)
192
+ # Positional
193
+ seq_len = tgt.size(1)
194
+ positions = torch.arange(0, seq_len, device=images.device).unsqueeze(0)
195
+ tgt_emb = tgt_emb + self.positional_embedding[positions]
196
+
197
+ # Mask ? Not needed for inference if we pass all past tokens,
198
+ # transformer decoder is autoregressive but usually we pass full sequence so far
199
+ # and it outputs all steps. Use the last one.
200
+ # Efficient implementation uses cache, but here we just re-forward.
201
+
202
+ tgt_mask = self.generate_square_subsequent_mask(seq_len).to(images.device)
203
+
204
+ output = self.decoder(tgt_emb, memory, tgt_mask=tgt_mask)
205
+ logits = self.head(output[:, -1, :]) # Take last step
206
+
207
+ next_token = logits.argmax(dim=-1, keepdim=True) # (B, 1)
208
+
209
+ tgt = torch.cat([tgt, next_token], dim=1)
210
+
211
+ # Check for end token
212
+ finished |= (next_token.squeeze(-1) == end_token)
213
+ if finished.all():
214
+ break
215
+
216
+ return tgt
217
+ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
218
+ mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
219
+ return mask
CoCap/cocap/modeling/eval_captioning.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 6/17/25
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : eval_captioning.py
6
+ import logging
7
+
8
+ from pycocoevalcap.bleu.bleu import Bleu
9
+ from pycocoevalcap.cider.cider import Cider
10
+ from pycocoevalcap.meteor.meteor import Meteor
11
+ from pycocoevalcap.rouge.rouge import Rouge
12
+ from pycocoevalcap.spice.spice import Spice
13
+ from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class EvalCap:
19
+ def __init__(self, annos, rests, cls_tokenizer=PTBTokenizer,
20
+ use_scorers=('Bleu', 'METEOR', 'ROUGE_L', 'CIDEr')):
21
+ self.evalImgs = []
22
+ self.eval = {}
23
+ self.imgToEval = {}
24
+ self.annos = annos
25
+ self.rests = rests
26
+ self.Tokenizer = cls_tokenizer
27
+ self.use_scorers = use_scorers
28
+
29
+ def evaluate(self):
30
+ res = {}
31
+ for r in self.rests:
32
+ res[str(r['image_id'])] = [{'caption': r['caption']}]
33
+
34
+ gts = {}
35
+ for imgId in self.annos:
36
+ gts[str(imgId)] = [{'caption': c} for c in self.annos[imgId]]
37
+
38
+ # =================================================
39
+ # Set up scorers
40
+ # =================================================
41
+ # print('tokenization...')
42
+ tokenizer = self.Tokenizer()
43
+ gts = tokenizer.tokenize(gts)
44
+ res = tokenizer.tokenize(res)
45
+ # =================================================
46
+ # Set up scorers
47
+ # =================================================
48
+ # print('setting up scorers...')
49
+ use_scorers = self.use_scorers
50
+ scorers = []
51
+ if 'Bleu' in use_scorers:
52
+ scorers.append((Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]))
53
+ if 'METEOR' in use_scorers:
54
+ scorers.append((Meteor(), "METEOR"))
55
+ if 'ROUGE_L' in use_scorers:
56
+ scorers.append((Rouge(), "ROUGE_L"))
57
+ if 'CIDEr' in use_scorers:
58
+ scorers.append((Cider(), "CIDEr"))
59
+ if 'SPICE' in use_scorers:
60
+ scorers.append((Spice(), "SPICE"))
61
+
62
+ # =================================================
63
+ # Compute scores
64
+ # =================================================
65
+ for scorer, method in scorers:
66
+ score, scores = scorer.compute_score(gts, res)
67
+ if type(method) == list:
68
+ for sc, scs, m in zip(score, scores, method):
69
+ self.setEval(sc, m)
70
+ self.setImgToEvalImgs(scs, gts.keys(), m)
71
+ else:
72
+ self.setEval(score, method)
73
+ self.setImgToEvalImgs(scores, gts.keys(), method)
74
+ self.setEvalImgs()
75
+
76
+ def setEval(self, score, method):
77
+ self.eval[method] = score
78
+
79
+ def setImgToEvalImgs(self, scores, imgIds, method):
80
+ for imgId, score in zip(imgIds, scores):
81
+ if not imgId in self.imgToEval:
82
+ self.imgToEval[imgId] = {}
83
+ self.imgToEval[imgId]["image_id"] = imgId
84
+ self.imgToEval[imgId][method] = score
85
+
86
+ def setEvalImgs(self):
87
+ self.evalImgs = [eval for imgId, eval in self.imgToEval.items()]
88
+
89
+
90
+ def evaluate(submission, reference):
91
+ tokenizer = PTBTokenizer # for English
92
+ annos = reference
93
+ data = submission['results']
94
+ rests = []
95
+ for name, value in data.items():
96
+ rests.append({'image_id': str(name), 'caption': value[0]['sentence']})
97
+ eval_cap = EvalCap(annos, rests, tokenizer)
98
+
99
+ eval_cap.evaluate()
100
+
101
+ all_score = {}
102
+ for metric, score in eval_cap.eval.items():
103
+ all_score[metric] = score
104
+ return all_score
CoCap/cocap/modeling/lm_cocap.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 6/16/25
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : cocap.py
6
+ import copy
7
+ import logging
8
+ import os
9
+ from collections import defaultdict
10
+
11
+ import pytorch_lightning as pl
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ from hydra_zen import builds
16
+ from pytorch_lightning.utilities.types import OptimizerLRScheduler
17
+ from torch.optim.lr_scheduler import LambdaLR
18
+
19
+ from cocap.modules.bert import BertLayerNorm
20
+ from cocap.modules.compressed_video import CompressedVideoCaptioner, compressed_video_captioner_pretrained_cfg
21
+ from .eval_captioning import evaluate
22
+ from .loss import LossBase, label_smoothing_loss_cfg
23
+ from .optimization import BertAdam
24
+ from ..utils.json import save_json
25
+ from ..utils.train_utils import gather_object_multiple_gpu, get_timestamp
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ def convert_ids_to_sentence(tokens):
31
+ from cocap.modules.clip.clip import _tokenizer
32
+ text = _tokenizer.decode(tokens)
33
+ text_list = text.split(" ")
34
+ new = []
35
+ for i in range(len(text_list)):
36
+ if i == 0:
37
+ new.append(text_list[i].split(">")[-1])
38
+ elif "<|endoftext|>" in text_list[i]:
39
+ break
40
+ else:
41
+ new.append(text_list[i])
42
+ return " ".join(new)
43
+
44
+
45
+ class CoCapLM(pl.LightningModule):
46
+ """CoCap Lightning Module"""
47
+
48
+ def __init__(
49
+ self,
50
+ cocap_model: CompressedVideoCaptioner,
51
+ loss: LossBase,
52
+ lr: float = 1e-4,
53
+ clip_lr: float = 1e-6,
54
+ warmup_ratio: float = 0.05,
55
+ lr_decay_gamma: float = 0.95,
56
+ ):
57
+ super().__init__()
58
+ self.model = cocap_model
59
+ self.loss = loss
60
+ self.lr = lr
61
+ self.clip_lr = clip_lr
62
+ self.warmup_ratio = warmup_ratio
63
+ self.lr_decay_gamma = lr_decay_gamma
64
+
65
+ self.batch_res = None
66
+
67
+ @property
68
+ def total_steps(self):
69
+ return self.trainer.estimated_stepping_batches
70
+
71
+ @property
72
+ def epoch_steps(self):
73
+ return self.trainer.estimated_stepping_batches // self.trainer.max_epochs
74
+
75
+ def configure_optimizers(self) -> OptimizerLRScheduler:
76
+ # based on:
77
+ # https://github.com/karpathy/minGPT/blob/3ed14b2cec0dfdad3f4b2831f2b4a86d11aef150/mingpt/model.py#L136
78
+ model = self.model
79
+
80
+ decay = set()
81
+ no_decay = set()
82
+
83
+ pretrained_modules = [
84
+ "compressed_video_transformer.rgb_encoder.conv1",
85
+ "compressed_video_transformer.rgb_encoder.class_embedding",
86
+ "compressed_video_transformer.rgb_encoder.positional_embedding",
87
+ "compressed_video_transformer.rgb_encoder.ln_pre",
88
+ "compressed_video_transformer.rgb_encoder.transformer",
89
+ "compressed_video_transformer.rgb_encoder.ln_post",
90
+ "compressed_video_transformer.rgb_encoder.proj",
91
+ "caption_head.cap_sa_decoder.word_embeddings",
92
+ "caption_head.prediction_head.decoder",
93
+ ]
94
+ whitelist_weight_modules = (nn.Linear, nn.MultiheadAttention, nn.Conv2d)
95
+ blacklist_weight_modules = (nn.LayerNorm, nn.BatchNorm2d, nn.Embedding, BertLayerNorm)
96
+ for mn, m in model.named_modules():
97
+ for pn, p in m.named_parameters():
98
+ fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
99
+
100
+ if any(fpn.startswith(p_fpn) for p_fpn in pretrained_modules): # pretrained
101
+ no_decay.add(fpn)
102
+ elif pn.endswith("bias"):
103
+ no_decay.add(fpn)
104
+ elif pn.endswith("proj") or pn.endswith("projection"):
105
+ decay.add(fpn)
106
+ elif fpn.endswith("embedding"):
107
+ no_decay.add(fpn)
108
+ elif pn.endswith("weight") and isinstance(m, whitelist_weight_modules):
109
+ decay.add(fpn)
110
+ elif pn.endswith("weight") and isinstance(m, blacklist_weight_modules):
111
+ no_decay.add(fpn)
112
+
113
+ param_dict = {pn: p for pn, p in model.named_parameters()}
114
+ inter_params = decay & no_decay
115
+ union_params = decay | no_decay
116
+ assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params),)
117
+ assert len(param_dict.keys() - union_params) == 0, \
118
+ "parameters %s were not separated into either decay/no_decay set!" % (
119
+ str(param_dict.keys() - union_params),)
120
+
121
+ pretrained_no_decay = [pn for pn in sorted(list(no_decay)) if
122
+ any(pn.startswith(p_pn) for p_pn in pretrained_modules)]
123
+ not_pretrained_no_decay = [pn for pn in sorted(list(no_decay)) if
124
+ not any(pn.startswith(p_pn) for p_pn in pretrained_modules)]
125
+
126
+ logger.debug("Parameter group decay_param: %s",
127
+ "\n " + "\n ".join([pn for pn in sorted(list(decay))]))
128
+ logger.debug("Parameter group no_decay_pretrained_param: %s",
129
+ "\n " + "\n ".join([pn for pn in sorted(list(pretrained_no_decay))]))
130
+ logger.debug("Parameter group no_decay_not_pretrained_param: %s",
131
+ "\n " + "\n ".join([pn for pn in sorted(list(not_pretrained_no_decay))]))
132
+
133
+ decay_param = [param_dict[pn] for pn in sorted(list(decay))]
134
+ no_decay_pretrained_param = [param_dict[pn] for pn in sorted(list(pretrained_no_decay))]
135
+ no_decay_not_pretrained_param = [param_dict[pn] for pn in sorted(list(not_pretrained_no_decay))]
136
+
137
+ optimizer_grouped_parameters = [
138
+ {"params": decay_param},
139
+ {"params": no_decay_pretrained_param, "weight_decay": 0.0, "lr": self.clip_lr},
140
+ {"params": no_decay_not_pretrained_param, "weight_decay": 0.0}
141
+ ]
142
+
143
+ optimizer = BertAdam(
144
+ optimizer_grouped_parameters,
145
+ lr=self.lr,
146
+ weight_decay=0.01,
147
+ max_grad_norm=1.0
148
+ )
149
+
150
+ def lr_lambda(current_step):
151
+ warmup_steps = self.warmup_ratio * self.total_steps
152
+ if current_step < warmup_steps:
153
+ return current_step / warmup_steps
154
+ else:
155
+ return self.lr_decay_gamma ** ((current_step - warmup_steps) // self.epoch_steps)
156
+
157
+ # Step-based warmup, epoch-based decay scheduler
158
+ warmup_decay_scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda)
159
+
160
+ return {
161
+ "optimizer": optimizer,
162
+ "lr_scheduler": {
163
+ "scheduler": warmup_decay_scheduler,
164
+ "interval": "step",
165
+ "frequency": 1,
166
+ "name": "warmup_decay"
167
+ }
168
+ }
169
+
170
+ def training_step(self, batch, batch_idx):
171
+ outputs = self.model(batch)
172
+ loss = self.loss(batch, outputs)
173
+ self.log("loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True,
174
+ batch_size=batch["input_labels"].size(0))
175
+ return loss
176
+
177
+ def on_validation_epoch_start(self) -> None:
178
+ self.batch_res = {"version": "VERSION 1.0",
179
+ "results": defaultdict(list),
180
+ "external_data": {"used": "true", "details": "ay"}}
181
+
182
+ def validation_step(self, batch, batch_idx):
183
+ inputs_ids = batch["input_ids"]
184
+ input_masks = batch["input_mask"]
185
+ max_t_len = self.model.caption_head.cap_config.max_t_len # hard-code sentence length, for speed test, set it to 21
186
+ inputs_ids[:, :] = 0.
187
+ input_masks[:, :] = 0.
188
+ assert torch.sum(input_masks[:, :]) == 0, "Initially, all text tokens should be masked"
189
+ bsz = len(inputs_ids)
190
+ next_symbols = torch.IntTensor([self.model.caption_head.cap_config.BOS_id] * bsz) # (N, )
191
+
192
+ warn_visual_output = False
193
+ for dec_idx in range(max_t_len):
194
+ inputs_ids[:, dec_idx] = next_symbols.clone()
195
+ input_masks[:, dec_idx] = 1
196
+ outputs = self.model(batch)
197
+ pred_scores = outputs["prediction_scores"]
198
+ next_words = pred_scores[:, dec_idx].max(1)[1]
199
+ next_symbols = next_words.cpu()
200
+ if "visual_output" in outputs:
201
+ batch["visual_output"] = outputs["visual_output"]
202
+ elif not warn_visual_output:
203
+ logger.warning("visual_output is not in the output of model, this may slow down the caption test")
204
+ warn_visual_output = True
205
+ dec_seq = inputs_ids
206
+
207
+ for example_idx, (cur_gen_sen, cur_meta) in enumerate(zip(dec_seq, batch['metadata'][1])):
208
+ cur_data = {
209
+ "sentence": convert_ids_to_sentence(cur_gen_sen.tolist()),
210
+ "gt_sentence": cur_meta
211
+ }
212
+ self.batch_res["results"][batch['metadata'][0][example_idx].split("video")[-1]].append(cur_data)
213
+
214
+ def on_validation_epoch_end(self) -> None:
215
+ json_res = copy.deepcopy(self.batch_res)
216
+ if dist.is_initialized():
217
+ all_results = gather_object_multiple_gpu(list(json_res["results"].items()))
218
+ json_res['results'] = {k: v for k, v in all_results}
219
+ logger.debug("Caption test length: %s", len(json_res["results"].items()))
220
+
221
+ # save result tp log for debug
222
+ if not dist.is_initialized() or dist.get_rank() == 0:
223
+ res_filepath = os.path.join(self.trainer.default_root_dir,
224
+ "caption_greedy_pred_validation_{}.json".format(get_timestamp()))
225
+ os.makedirs(os.path.dirname(res_filepath), exist_ok=True)
226
+ save_json(json_res, res_filepath, save_pretty=True)
227
+
228
+ if not dist.is_initialized() or dist.get_rank() == 0:
229
+ json_ref = self.trainer.val_dataloaders.dataset.json_ref
230
+ metrics = evaluate(json_res, json_ref)
231
+ self.log_dict(metrics, on_step=False, on_epoch=True, logger=True)
232
+
233
+ if dist.is_initialized():
234
+ dist.barrier()
235
+
236
+
237
+ cocap_lm_cfg = builds(
238
+ CoCapLM,
239
+ cocap_model=compressed_video_captioner_pretrained_cfg,
240
+ loss=label_smoothing_loss_cfg,
241
+ populate_full_signature=True
242
+ )
CoCap/cocap/modeling/loss.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 7/19/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : loss.py
6
+
7
+ __all__ = [
8
+ "LossBase",
9
+ "LabelSmoothingLoss",
10
+ "label_smoothing_loss_cfg"
11
+ ]
12
+
13
+ import logging
14
+ from abc import abstractmethod
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from hydra_zen import builds
19
+ from torch import Tensor, nn
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class LossBase(nn.Module):
25
+ @abstractmethod
26
+ def forward(self, inputs, outputs) -> Tensor:
27
+ """Compute loss."""
28
+
29
+
30
+ class LabelSmoothingLoss(LossBase):
31
+ def __init__(self, label_smoothing=0.1, target_vocab_size=49408, ignore_index=0):
32
+ assert 0.0 < label_smoothing <= 1.0
33
+
34
+ super().__init__()
35
+
36
+ self.tgt_vocab_size = target_vocab_size
37
+ self.ignore_index = ignore_index
38
+
39
+ self.log_softmax = nn.LogSoftmax(dim=-1)
40
+
41
+ smoothing_value = label_smoothing / (self.tgt_vocab_size - 1) # count for the ground-truth word
42
+ one_hot = torch.full((self.tgt_vocab_size,), smoothing_value)
43
+ # one_hot[self.ignore_index] = 0
44
+ self.register_buffer("one_hot", one_hot.unsqueeze(0))
45
+
46
+ self.confidence = 1.0 - label_smoothing
47
+
48
+ def forward(self, target, output):
49
+ output = output["prediction_scores"]
50
+ output = output.view(-1, self.tgt_vocab_size)
51
+ target = target['input_labels'].reshape(-1).long()
52
+ valid_indices = target != self.ignore_index # ignore examples with target value -1
53
+ target = target[valid_indices]
54
+ output = self.log_softmax(output[valid_indices])
55
+
56
+ model_prob = self.one_hot.repeat(target.size(0), 1).to(target.device)
57
+ model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
58
+ return F.kl_div(output, model_prob, reduction="sum")
59
+
60
+
61
+ # Build configs for organizing modules with hydra
62
+ label_smoothing_loss_cfg = builds(LabelSmoothingLoss, populate_full_signature=True)
CoCap/cocap/modeling/optimization.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """PyTorch optimization for BERT model."""
17
+
18
+ __all__ = [
19
+ "BertAdam",
20
+ "bert_adam_cfg",
21
+ ]
22
+
23
+ import logging
24
+
25
+ import math
26
+ import torch
27
+ from hydra_zen import builds
28
+ from torch.nn.utils import clip_grad_norm_
29
+ from torch.optim import Optimizer
30
+ from torch.optim.optimizer import required
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ def warmup_cosine(x, warmup=0.002):
36
+ if x < warmup:
37
+ return x / warmup
38
+ return 0.5 * (1.0 + torch.cos(math.pi * x))
39
+
40
+
41
+ def warmup_constant(x, warmup=0.002):
42
+ """ Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
43
+ Learning rate is 1. afterwards. """
44
+ if x < warmup:
45
+ return x / warmup
46
+ return 1.0
47
+
48
+
49
+ def warmup_linear(x, warmup=0.002):
50
+ """ Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
51
+ After `t_total`-th training step, learning rate is zero. """
52
+ if x < warmup:
53
+ return x / warmup
54
+ return max((x - 1.) / (warmup - 1.), 0)
55
+
56
+
57
+ SCHEDULES = {
58
+ 'warmup_cosine': warmup_cosine,
59
+ 'warmup_constant': warmup_constant,
60
+ 'warmup_linear': warmup_linear,
61
+ }
62
+
63
+
64
+ class BertAdam(Optimizer):
65
+ """Implements BERT version of Adam algorithm with weight decay fix.
66
+ Params:
67
+ lr: learning rate
68
+ warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
69
+ t_total: total number of training steps for the learning
70
+ rate schedule, -1 means constant learning rate. Default: -1
71
+ schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
72
+ b1: Adams b1. Default: 0.9
73
+ b2: Adams b2. Default: 0.999
74
+ e: Adams epsilon. Default: 1e-6
75
+ weight_decay: Weight decay. Default: 0.01
76
+ max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
77
+ """
78
+
79
+ def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
80
+ b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
81
+ max_grad_norm=1.0):
82
+ if lr is not required and lr < 0.0:
83
+ raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
84
+ if schedule not in SCHEDULES:
85
+ raise ValueError("Invalid schedule parameter: {}".format(schedule))
86
+ if not 0.0 <= warmup < 1.0 and not warmup == -1:
87
+ raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
88
+ if not 0.0 <= b1 < 1.0:
89
+ raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
90
+ if not 0.0 <= b2 < 1.0:
91
+ raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
92
+ if not e >= 0.0:
93
+ raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
94
+ defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
95
+ b1=b1, b2=b2, e=e, weight_decay=weight_decay,
96
+ max_grad_norm=max_grad_norm)
97
+ super(BertAdam, self).__init__(params, defaults)
98
+
99
+ def get_lr(self):
100
+ lr = []
101
+ for group in self.param_groups:
102
+ for p in group['params']:
103
+ if p.grad is None:
104
+ continue
105
+ state = self.state[p]
106
+ if len(state) == 0:
107
+ return [0]
108
+ if group['t_total'] != -1:
109
+ schedule_fct = SCHEDULES[group['schedule']]
110
+ lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])
111
+ else:
112
+ lr_scheduled = group['lr']
113
+ lr.append(lr_scheduled)
114
+ return lr
115
+
116
+ def step(self, closure=None):
117
+ """Performs a single optimization step.
118
+ Arguments:
119
+ closure (callable, optional): A closure that reevaluates the model
120
+ and returns the loss.
121
+ """
122
+ loss = None
123
+ if closure is not None:
124
+ loss = closure()
125
+
126
+ for group in self.param_groups:
127
+ for p in group['params']:
128
+ if p.grad is None:
129
+ continue
130
+ grad = p.grad.data
131
+ if grad.is_sparse:
132
+ raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
133
+
134
+ state = self.state[p]
135
+
136
+ # State initialization
137
+ if len(state) == 0:
138
+ state['step'] = 0
139
+ # Exponential moving average of gradient values
140
+ state['next_m'] = torch.zeros_like(p.data)
141
+ # Exponential moving average of squared gradient values
142
+ state['next_v'] = torch.zeros_like(p.data)
143
+
144
+ next_m, next_v = state['next_m'], state['next_v']
145
+ beta1, beta2 = group['b1'], group['b2']
146
+
147
+ # Add grad clipping
148
+ if group['max_grad_norm'] > 0:
149
+ clip_grad_norm_(p, group['max_grad_norm'])
150
+
151
+ # Decay the first and second moment running average coefficient
152
+ # In-place operations to update the averages at the same time
153
+ # next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7
154
+ next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
155
+ # next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7
156
+ next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
157
+ update = next_m / (next_v.sqrt() + group['e'])
158
+
159
+ # Just adding the square of the weights to the loss function is *not*
160
+ # the correct way of using L2 regularization/weight decay with Adam,
161
+ # since that will interact with the m and v parameters in strange ways.
162
+ #
163
+ # Instead we want to decay the weights in a manner that doesn't interact
164
+ # with the m/v parameters. This is equivalent to adding the square
165
+ # of the weights to the loss with plain (non-momentum) SGD.
166
+ if group['weight_decay'] > 0.0:
167
+ update += group['weight_decay'] * p.data
168
+
169
+ if group['t_total'] != -1:
170
+ schedule_fct = SCHEDULES[group['schedule']]
171
+ progress = state['step'] / group['t_total']
172
+ lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
173
+ else:
174
+ lr_scheduled = group['lr']
175
+
176
+ update_with_lr = lr_scheduled * update
177
+ p.data.add_(-update_with_lr)
178
+
179
+ state['step'] += 1
180
+
181
+ return loss
182
+
183
+
184
+ # Build configs for organizing modules with hydra
185
+ bert_adam_cfg = builds(BertAdam, populate_full_signature=True)
CoCap/cocap/modules/README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # layers
2
+
CoCap/cocap/modules/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 5/30/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/modules/audio_encoder.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import os
5
+ from cocap.modules.beats import BEATs, BEATsConfig
6
+
7
+ class BEATsAudioEncoder(nn.Module):
8
+ def __init__(self, model_path=None, model_cfg=None):
9
+ super().__init__()
10
+ # Default Config for BEATs iter3+ (as2m) usually matches Base or Large?
11
+ # Let's assume Base (768) unless specified.
12
+ # If the checkpoint is loaded, it might have a config.
13
+
14
+ # Load config logic
15
+ checkpoint = torch.load(model_path, map_location='cpu') if model_path else None
16
+ cfg_dict = checkpoint['cfg'] if checkpoint and 'cfg' in checkpoint else None
17
+
18
+ # Create Config
19
+ self.cfg = BEATsConfig(cfg_dict)
20
+ if model_cfg:
21
+ self.cfg.update(model_cfg)
22
+
23
+ self.model = BEATs(self.cfg)
24
+
25
+ if checkpoint:
26
+ if 'model' in checkpoint:
27
+ self.model.load_state_dict(checkpoint['model'], strict=False)
28
+ else:
29
+ self.model.load_state_dict(checkpoint, strict=False)
30
+ print(f"BEATs loaded from {model_path}")
31
+
32
+ def forward(self, input, padding_mask=None):
33
+ """
34
+ Input: (B, T, F) - Mel Spectrogram (already normalized in dataset)
35
+ BEATs expects (B, T, F) but extract_features assumes input is waveform OR fbank.
36
+ If fbank, expects (B, T, F)?
37
+ Let's check BEATs.extract_features:
38
+ fbank = fbank.unsqueeze(1) # (B, 1, T, F) ??
39
+ features = self.patch_embedding(fbank)
40
+
41
+ Standard BEATs:
42
+ fbank = self.preprocess(source) -> returns (B, T, 128)
43
+ Then fbank.unsqueeze(1) -> (B, 1, T, 128)
44
+ Then Conv2d(1, embed, patch, stride)
45
+
46
+ So input should be (B, T, F).
47
+ """
48
+ # (B, T, F) -> BEATs
49
+ # We modified BEATs to take is_fbank=True
50
+
51
+ # We need padding mask?
52
+ # AVCapsDataset returns audio_spec padded with 0.
53
+ # We can generate padding mask from 0s? Or just computed mask?
54
+ # Dataset currently uses pad_sequence with padding_value=0.
55
+
56
+ # Create padding mask (1 for padded, 0 for valid) in BEATs convention?
57
+ # BEATs: "padding_mask of shape (batch_size, seq_len) with 1 for positions to ignore"
58
+ # Since we just have padded input, we can infer it or pass it.
59
+ # Ideally, we should update Dataset to return a mask.
60
+ # For now, let's assume all valid or simple padding 0 check if robust.
61
+ # But 0 can be a valid mel value?
62
+ # Audio Spec is normalized. 0 might be valid.
63
+ # Best to treat all as valid for now or pass mask.
64
+
65
+ x, _ = self.model.extract_features(input, padding_mask=padding_mask, is_fbank=True)
66
+ # x: (B, T_out, 768)
67
+
68
+ # Global pooling?
69
+ # BEATs returns sequence. To get global, we can mean pool.
70
+ x = x.mean(dim=1)
71
+ return x
72
+
73
+ class CNN14(nn.Module):
74
+ def __init__(self, sample_rate=32000, window_size=1024, hop_size=320, mel_bins=64, fmin=50, fmax=14000, classes_num=527):
75
+ super(CNN14, self).__init__()
76
+
77
+ # Logmel spectrogram extractor (Assuming input is waveform, if input is spec, this part is skipped)
78
+ # However, usually we use Torchaudio or similar.
79
+ # For this implementation, we will assume the input is already a Mel Spectrogram
80
+ # or we implement a simple Conv2d backbone.
81
+
82
+ self.bn0 = nn.BatchNorm2d(64)
83
+
84
+ self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
85
+ self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
86
+ self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
87
+ self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
88
+ self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
89
+ self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
90
+
91
+ self.fc1 = nn.Linear(2048, 2048, bias=True)
92
+ self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
93
+
94
+ def forward(self, input, mixup_lambda=None):
95
+ """
96
+ Input: (batch_size, 1, time_steps, mel_bins) or (batch_size, time_steps, mel_bins)
97
+ """
98
+ if input.dim() == 3:
99
+ input = input.unsqueeze(1) # Add channel dim
100
+
101
+ x = input.transpose(1, 3) # (batch, 1, time, mel) -> (batch, mel, time, 1) ?
102
+ # Standard PANNs takes (batch, 1, time_steps, mel_bins). Let's assume input matches that logic
103
+ # BUT usually PANNs expects (Batch, 1, Time, Freq).
104
+ # We will enforce input as (Batch, 1, Time, Freq)
105
+
106
+ # However, checking common implementations, usually it is (Batch, 1, Freq, Time) for Conv2d
107
+ # Let's permute to (Batch, 1, Freq, Time)
108
+ x = input.permute(0, 1, 3, 2)
109
+
110
+ x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
111
+ x = torch.dropout(x, p=0.2, train=self.training)
112
+
113
+ x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
114
+ x = torch.dropout(x, p=0.2, train=self.training)
115
+
116
+ x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
117
+ x = torch.dropout(x, p=0.2, train=self.training)
118
+
119
+ x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
120
+ x = torch.dropout(x, p=0.2, train=self.training)
121
+
122
+ x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
123
+ x = torch.dropout(x, p=0.2, train=self.training)
124
+
125
+ x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
126
+ x = torch.dropout(x, p=0.2, train=self.training)
127
+
128
+ # Global pooling
129
+ x = torch.mean(x, dim=3) # Average over time
130
+ (x1, _) = torch.max(x, dim=2) # Max over freq
131
+ x2 = torch.mean(x, dim=2) # Mean over freq
132
+
133
+ x = x1 + x2 # Sum global pooling
134
+
135
+ x = F.relu_(self.fc1(x))
136
+ # embedding = torch.dropout(x, p=0.5, train=self.training)
137
+ # clip_output = torch.sigmoid(self.fc_audioset(x))
138
+
139
+ # we return the global embedding
140
+ return x
141
+
142
+ def load_from_pretrain(self, path):
143
+ if not os.path.exists(path):
144
+ print(f"Pretrained model not found at {path}")
145
+ return
146
+
147
+ print(f"Loading Audio Encoder from {path}")
148
+ checkpoint = torch.load(path, map_location='cpu')
149
+
150
+ if 'model' in checkpoint:
151
+ state_dict = checkpoint['model']
152
+ else:
153
+ state_dict = checkpoint
154
+
155
+ # PANNs checkpoint might have 'module.' prefix if trained with DataParallel
156
+ new_state_dict = {}
157
+ for k, v in state_dict.items():
158
+ if k.startswith('module.'):
159
+ new_state_dict[k[7:]] = v
160
+ else:
161
+ new_state_dict[k] = v
162
+
163
+ # Filter out fc_audioset if sizes don't match (we defined classes_num=527 which is standard, but just in case)
164
+ # Also FC1 might differ if not 2048.
165
+
166
+ msg = self.load_state_dict(new_state_dict, strict=False)
167
+ print(f"Audio Encoder Loaded: {msg}")
168
+
169
+ import os
170
+ class ConvBlock(nn.Module):
171
+ def __init__(self, in_channels, out_channels):
172
+ super(ConvBlock, self).__init__()
173
+ self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
174
+ kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
175
+ self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels,
176
+ kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
177
+ self.bn1 = nn.BatchNorm2d(out_channels)
178
+ self.bn2 = nn.BatchNorm2d(out_channels)
179
+
180
+ def forward(self, input, pool_size=(2, 2), pool_type='avg'):
181
+ x = input
182
+ x = F.relu_(self.bn1(self.conv1(x)))
183
+ x = F.relu_(self.bn2(self.conv2(x)))
184
+ if pool_type == 'max':
185
+ x = F.max_pool2d(x, kernel_size=pool_size)
186
+ elif pool_type == 'avg':
187
+ x = F.avg_pool2d(x, kernel_size=pool_size)
188
+ elif pool_type == 'avg+max':
189
+ x1 = F.avg_pool2d(x, kernel_size=pool_size)
190
+ x2 = F.max_pool2d(x, kernel_size=pool_size)
191
+ x = x1 + x2
192
+ else:
193
+ raise Exception('Incorrect argument!')
194
+ return x
CoCap/cocap/modules/beats/BEATs.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch.nn import LayerNorm
14
+ import torchaudio.compliance.kaldi as ta_kaldi
15
+
16
+ from .backbone import (
17
+ TransformerEncoder,
18
+ )
19
+
20
+ import logging
21
+ from typing import Optional
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class BEATsConfig:
27
+ def __init__(self, cfg=None):
28
+ self.input_patch_size: int = -1 # path size of patch embedding
29
+ self.embed_dim: int = 512 # patch embedding dimension
30
+ self.conv_bias: bool = False # include bias in conv encoder
31
+
32
+ self.encoder_layers: int = 12 # num encoder layers in the transformer
33
+ self.encoder_embed_dim: int = 768 # encoder embedding dimension
34
+ self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
35
+ self.encoder_attention_heads: int = 12 # num encoder attention heads
36
+ self.activation_fn: str = "gelu" # activation function to use
37
+
38
+ self.layer_wise_gradient_decay_ratio: float = 1.0 # ratio for layer-wise gradient decay
39
+ self.layer_norm_first: bool = False # apply layernorm first in the transformer
40
+ self.deep_norm: bool = False # apply deep_norm first in the transformer
41
+
42
+ # dropouts
43
+ self.dropout: float = 0.1 # dropout probability for the transformer
44
+ self.attention_dropout: float = 0.1 # dropout probability for attention weights
45
+ self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
46
+ self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
47
+ self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
48
+
49
+ # positional embeddings
50
+ self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
51
+ self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
52
+
53
+ # relative position embedding
54
+ self.relative_position_embedding: bool = False # apply relative position embedding
55
+ self.num_buckets: int = 320 # number of buckets for relative position embedding
56
+ self.max_distance: int = 1280 # maximum distance for relative position embedding
57
+ self.gru_rel_pos: bool = False # apply gated relative position embedding
58
+
59
+ # label predictor
60
+ self.finetuned_model: bool = False # whether the model is a fine-tuned model.
61
+ self.predictor_dropout: float = 0.1 # dropout probability for the predictor
62
+ self.predictor_class: int = 527 # target class number for the predictor
63
+
64
+ if cfg is not None:
65
+ self.update(cfg)
66
+
67
+ def update(self, cfg: dict):
68
+ self.__dict__.update(cfg)
69
+
70
+
71
+ class BEATs(nn.Module):
72
+ def __init__(
73
+ self,
74
+ cfg: BEATsConfig,
75
+ ) -> None:
76
+ super().__init__()
77
+ logger.info(f"BEATs Config: {cfg.__dict__}")
78
+
79
+ self.cfg = cfg
80
+
81
+ self.embed = cfg.embed_dim
82
+ self.post_extract_proj = (
83
+ nn.Linear(self.embed, cfg.encoder_embed_dim)
84
+ if self.embed != cfg.encoder_embed_dim
85
+ else None
86
+ )
87
+
88
+ self.input_patch_size = cfg.input_patch_size
89
+ self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size,
90
+ bias=cfg.conv_bias)
91
+
92
+ self.dropout_input = nn.Dropout(cfg.dropout_input)
93
+
94
+ assert not cfg.deep_norm or not cfg.layer_norm_first
95
+ self.encoder = TransformerEncoder(cfg)
96
+ self.layer_norm = LayerNorm(self.embed)
97
+
98
+ if cfg.finetuned_model:
99
+ self.predictor_dropout = nn.Dropout(cfg.predictor_dropout)
100
+ self.predictor = nn.Linear(cfg.encoder_embed_dim, cfg.predictor_class)
101
+ else:
102
+ self.predictor = None
103
+
104
+ def forward_padding_mask(
105
+ self,
106
+ features: torch.Tensor,
107
+ padding_mask: torch.Tensor,
108
+ ) -> torch.Tensor:
109
+ extra = padding_mask.size(1) % features.size(1)
110
+ if extra > 0:
111
+ padding_mask = padding_mask[:, :-extra]
112
+ padding_mask = padding_mask.view(
113
+ padding_mask.size(0), features.size(1), -1
114
+ )
115
+ padding_mask = padding_mask.all(-1)
116
+ return padding_mask
117
+
118
+ def preprocess(
119
+ self,
120
+ source: torch.Tensor,
121
+ fbank_mean: float = 15.41663,
122
+ fbank_std: float = 6.55582,
123
+ ) -> torch.Tensor:
124
+ fbanks = []
125
+ for waveform in source:
126
+ waveform = waveform.unsqueeze(0) * 2 ** 15
127
+ fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
128
+ fbanks.append(fbank)
129
+ fbank = torch.stack(fbanks, dim=0)
130
+ fbank = (fbank - fbank_mean) / (2 * fbank_std)
131
+ return fbank
132
+
133
+ def extract_features(
134
+ self,
135
+ source: torch.Tensor,
136
+ padding_mask: Optional[torch.Tensor] = None,
137
+ fbank_mean: float = 15.41663,
138
+ fbank_std: float = 6.55582,
139
+ is_fbank: bool = False,
140
+ ):
141
+ if is_fbank:
142
+ fbank = source
143
+ else:
144
+ fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std)
145
+
146
+ if padding_mask is not None:
147
+ padding_mask = self.forward_padding_mask(fbank, padding_mask)
148
+
149
+ fbank = fbank.unsqueeze(1)
150
+ features = self.patch_embedding(fbank)
151
+ features = features.reshape(features.shape[0], features.shape[1], -1)
152
+ features = features.transpose(1, 2)
153
+ features = self.layer_norm(features)
154
+
155
+ if padding_mask is not None:
156
+ padding_mask = self.forward_padding_mask(features, padding_mask)
157
+
158
+ if self.post_extract_proj is not None:
159
+ features = self.post_extract_proj(features)
160
+
161
+ x = self.dropout_input(features)
162
+
163
+ x, layer_results = self.encoder(
164
+ x,
165
+ padding_mask=padding_mask,
166
+ )
167
+
168
+ if self.predictor is not None:
169
+ x = self.predictor_dropout(x)
170
+ logits = self.predictor(x)
171
+
172
+ if padding_mask is not None and padding_mask.any():
173
+ logits[padding_mask] = 0
174
+ logits = logits.sum(dim=1)
175
+ logits = logits / (~padding_mask).sum(dim=1).unsqueeze(-1).expand_as(logits)
176
+ else:
177
+ logits = logits.mean(dim=1)
178
+
179
+ lprobs = torch.sigmoid(logits)
180
+
181
+ return lprobs, padding_mask
182
+ else:
183
+ return x, padding_mask
CoCap/cocap/modules/beats/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .BEATs import BEATs, BEATsConfig
CoCap/cocap/modules/beats/backbone.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+ import math
11
+ import numpy as np
12
+ from typing import Dict, Optional, Tuple
13
+ import torch
14
+ from torch import Tensor, nn
15
+ import torch.nn.functional as F
16
+ from torch.nn import LayerNorm, Parameter
17
+ from .modules import (
18
+ GradMultiply,
19
+ SamePad,
20
+ get_activation_fn,
21
+ GLU_Linear,
22
+ quant_noise,
23
+ )
24
+
25
+
26
+ class TransformerEncoder(nn.Module):
27
+ def __init__(self, args):
28
+ super().__init__()
29
+
30
+ self.dropout = args.dropout
31
+ self.embedding_dim = args.encoder_embed_dim
32
+
33
+ self.pos_conv = nn.Conv1d(
34
+ self.embedding_dim,
35
+ self.embedding_dim,
36
+ kernel_size=args.conv_pos,
37
+ padding=args.conv_pos // 2,
38
+ groups=args.conv_pos_groups,
39
+ )
40
+ dropout = 0
41
+ std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
42
+ nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
43
+ nn.init.constant_(self.pos_conv.bias, 0)
44
+
45
+ self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
46
+ self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
47
+
48
+ if hasattr(args, "relative_position_embedding"):
49
+ self.relative_position_embedding = args.relative_position_embedding
50
+ self.num_buckets = args.num_buckets
51
+ self.max_distance = args.max_distance
52
+ else:
53
+ self.relative_position_embedding = False
54
+ self.num_buckets = 0
55
+ self.max_distance = 0
56
+
57
+ self.layers = nn.ModuleList(
58
+ [
59
+ TransformerSentenceEncoderLayer(
60
+ embedding_dim=self.embedding_dim,
61
+ ffn_embedding_dim=args.encoder_ffn_embed_dim,
62
+ num_attention_heads=args.encoder_attention_heads,
63
+ dropout=self.dropout,
64
+ attention_dropout=args.attention_dropout,
65
+ activation_dropout=args.activation_dropout,
66
+ activation_fn=args.activation_fn,
67
+ layer_norm_first=args.layer_norm_first,
68
+ deep_norm=args.deep_norm,
69
+ has_relative_attention_bias=self.relative_position_embedding,
70
+ num_buckets=self.num_buckets,
71
+ max_distance=self.max_distance,
72
+ gru_rel_pos=args.gru_rel_pos,
73
+ encoder_layers=args.encoder_layers,
74
+ )
75
+ for i in range(args.encoder_layers)
76
+ ]
77
+ )
78
+ if self.relative_position_embedding:
79
+ for i in range(1, args.encoder_layers):
80
+ del self.layers[i].self_attn.relative_attention_bias
81
+ self.layers[i].self_attn.relative_attention_bias = self.layers[0].self_attn.relative_attention_bias
82
+
83
+ self.layer_norm_first = args.layer_norm_first
84
+ self.layer_norm = LayerNorm(self.embedding_dim)
85
+ self.layerdrop = args.encoder_layerdrop
86
+
87
+ self.apply(init_bert_params)
88
+
89
+ if args.deep_norm:
90
+ deep_norm_beta = math.pow(8 * args.encoder_layers, -1 / 4)
91
+ for i in range(args.encoder_layers):
92
+ nn.init.xavier_normal_(self.layers[i].self_attn.k_proj.weight, gain=1)
93
+ nn.init.xavier_normal_(self.layers[i].self_attn.v_proj.weight, gain=deep_norm_beta)
94
+ nn.init.xavier_normal_(self.layers[i].self_attn.q_proj.weight, gain=1)
95
+ nn.init.xavier_normal_(self.layers[i].self_attn.out_proj.weight, gain=deep_norm_beta)
96
+ nn.init.xavier_normal_(self.layers[i].fc1.weight, gain=deep_norm_beta)
97
+ nn.init.xavier_normal_(self.layers[i].fc2.weight, gain=deep_norm_beta)
98
+
99
+ self.layer_wise_gradient_decay_ratio = getattr(args, "layer_wise_gradient_decay_ratio", 1)
100
+
101
+ def forward(self, x, padding_mask=None, layer=None):
102
+ x, layer_results = self.extract_features(x, padding_mask, layer)
103
+
104
+ if self.layer_norm_first and layer is None:
105
+ x = self.layer_norm(x)
106
+
107
+ return x, layer_results
108
+
109
+ def extract_features(self, x, padding_mask=None, tgt_layer=None):
110
+
111
+ if padding_mask is not None:
112
+ x[padding_mask] = 0
113
+
114
+ x_conv = self.pos_conv(x.transpose(1, 2))
115
+ x_conv = x_conv.transpose(1, 2)
116
+ x = x + x_conv
117
+
118
+ if not self.layer_norm_first:
119
+ x = self.layer_norm(x)
120
+
121
+ x = F.dropout(x, p=self.dropout, training=self.training)
122
+
123
+ # B x T x C -> T x B x C
124
+ x = x.transpose(0, 1)
125
+
126
+ layer_results = []
127
+ z = None
128
+ if tgt_layer is not None:
129
+ layer_results.append((x, z))
130
+ r = None
131
+ pos_bias = None
132
+ for i, layer in enumerate(self.layers):
133
+ if self.layer_wise_gradient_decay_ratio != 1.0:
134
+ x = GradMultiply.apply(x, self.layer_wise_gradient_decay_ratio)
135
+ dropout_probability = np.random.random()
136
+ if not self.training or (dropout_probability > self.layerdrop):
137
+ x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_bias)
138
+ if tgt_layer is not None:
139
+ layer_results.append((x, z))
140
+ if i == tgt_layer:
141
+ r = x
142
+ break
143
+
144
+ if r is not None:
145
+ x = r
146
+
147
+ # T x B x C -> B x T x C
148
+ x = x.transpose(0, 1)
149
+
150
+ return x, layer_results
151
+
152
+
153
+ class TransformerSentenceEncoderLayer(nn.Module):
154
+ def __init__(
155
+ self,
156
+ embedding_dim: float = 768,
157
+ ffn_embedding_dim: float = 3072,
158
+ num_attention_heads: float = 8,
159
+ dropout: float = 0.1,
160
+ attention_dropout: float = 0.1,
161
+ activation_dropout: float = 0.1,
162
+ activation_fn: str = "relu",
163
+ layer_norm_first: bool = False,
164
+ deep_norm: bool = False,
165
+ has_relative_attention_bias: bool = False,
166
+ num_buckets: int = 0,
167
+ max_distance: int = 0,
168
+ rescale_init: bool = False,
169
+ gru_rel_pos: bool = False,
170
+ encoder_layers: int = 0,
171
+ ) -> None:
172
+
173
+ super().__init__()
174
+ self.embedding_dim = embedding_dim
175
+ self.dropout = dropout
176
+ self.activation_dropout = activation_dropout
177
+
178
+ self.activation_name = activation_fn
179
+ self.activation_fn = get_activation_fn(activation_fn)
180
+ self.self_attn = MultiheadAttention(
181
+ self.embedding_dim,
182
+ num_attention_heads,
183
+ dropout=attention_dropout,
184
+ self_attention=True,
185
+ has_relative_attention_bias=has_relative_attention_bias,
186
+ num_buckets=num_buckets,
187
+ max_distance=max_distance,
188
+ rescale_init=rescale_init,
189
+ gru_rel_pos=gru_rel_pos,
190
+ )
191
+
192
+ self.dropout1 = nn.Dropout(dropout)
193
+ self.dropout2 = nn.Dropout(self.activation_dropout)
194
+ self.dropout3 = nn.Dropout(dropout)
195
+
196
+ self.layer_norm_first = layer_norm_first
197
+
198
+ self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
199
+
200
+ if self.activation_name == "glu":
201
+ self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
202
+ else:
203
+ self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
204
+ self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
205
+
206
+ self.final_layer_norm = LayerNorm(self.embedding_dim)
207
+
208
+ self.deep_norm = deep_norm
209
+ if self.deep_norm:
210
+ self.deep_norm_alpha = math.pow(2 * encoder_layers, 1 / 4)
211
+ else:
212
+ self.deep_norm_alpha = 1
213
+
214
+ def forward(
215
+ self,
216
+ x: torch.Tensor,
217
+ self_attn_mask: torch.Tensor = None,
218
+ self_attn_padding_mask: torch.Tensor = None,
219
+ need_weights: bool = False,
220
+ pos_bias=None
221
+ ):
222
+ residual = x
223
+
224
+ if self.layer_norm_first:
225
+ x = self.self_attn_layer_norm(x)
226
+ x, attn, pos_bias = self.self_attn(
227
+ query=x,
228
+ key=x,
229
+ value=x,
230
+ key_padding_mask=self_attn_padding_mask,
231
+ need_weights=False,
232
+ attn_mask=self_attn_mask,
233
+ position_bias=pos_bias
234
+ )
235
+ x = self.dropout1(x)
236
+ x = residual + x
237
+
238
+ residual = x
239
+ x = self.final_layer_norm(x)
240
+ if self.activation_name == "glu":
241
+ x = self.fc1(x)
242
+ else:
243
+ x = self.activation_fn(self.fc1(x))
244
+ x = self.dropout2(x)
245
+ x = self.fc2(x)
246
+ x = self.dropout3(x)
247
+ x = residual + x
248
+ else:
249
+ x, attn, pos_bias = self.self_attn(
250
+ query=x,
251
+ key=x,
252
+ value=x,
253
+ key_padding_mask=self_attn_padding_mask,
254
+ need_weights=need_weights,
255
+ attn_mask=self_attn_mask,
256
+ position_bias=pos_bias
257
+ )
258
+
259
+ x = self.dropout1(x)
260
+ x = residual * self.deep_norm_alpha + x
261
+
262
+ x = self.self_attn_layer_norm(x)
263
+
264
+ residual = x
265
+ if self.activation_name == "glu":
266
+ x = self.fc1(x)
267
+ else:
268
+ x = self.activation_fn(self.fc1(x))
269
+ x = self.dropout2(x)
270
+ x = self.fc2(x)
271
+ x = self.dropout3(x)
272
+ x = residual * self.deep_norm_alpha + x
273
+ x = self.final_layer_norm(x)
274
+
275
+ return x, attn, pos_bias
276
+
277
+
278
+ class MultiheadAttention(nn.Module):
279
+ """Multi-headed attention.
280
+
281
+ See "Attention Is All You Need" for more details.
282
+ """
283
+
284
+ def __init__(
285
+ self,
286
+ embed_dim,
287
+ num_heads,
288
+ kdim=None,
289
+ vdim=None,
290
+ dropout=0.0,
291
+ bias=True,
292
+ add_bias_kv=False,
293
+ add_zero_attn=False,
294
+ self_attention=False,
295
+ encoder_decoder_attention=False,
296
+ q_noise=0.0,
297
+ qn_block_size=8,
298
+ has_relative_attention_bias=False,
299
+ num_buckets=32,
300
+ max_distance=128,
301
+ gru_rel_pos=False,
302
+ rescale_init=False,
303
+ ):
304
+ super().__init__()
305
+ self.embed_dim = embed_dim
306
+ self.kdim = kdim if kdim is not None else embed_dim
307
+ self.vdim = vdim if vdim is not None else embed_dim
308
+ self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
309
+
310
+ self.num_heads = num_heads
311
+ self.dropout_module = nn.Dropout(dropout)
312
+
313
+ self.has_relative_attention_bias = has_relative_attention_bias
314
+ self.num_buckets = num_buckets
315
+ self.max_distance = max_distance
316
+ if self.has_relative_attention_bias:
317
+ self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
318
+
319
+ self.head_dim = embed_dim // num_heads
320
+ self.q_head_dim = self.head_dim
321
+ self.k_head_dim = self.head_dim
322
+ assert (
323
+ self.head_dim * num_heads == self.embed_dim
324
+ ), "embed_dim must be divisible by num_heads"
325
+ self.scaling = self.head_dim ** -0.5
326
+
327
+ self.self_attention = self_attention
328
+ self.encoder_decoder_attention = encoder_decoder_attention
329
+
330
+ assert not self.self_attention or self.qkv_same_dim, (
331
+ "Self-attention requires query, key and " "value to be of the same size"
332
+ )
333
+
334
+ k_bias = True
335
+ if rescale_init:
336
+ k_bias = False
337
+
338
+ k_embed_dim = embed_dim
339
+ q_embed_dim = embed_dim
340
+
341
+ self.k_proj = quant_noise(
342
+ nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size
343
+ )
344
+ self.v_proj = quant_noise(
345
+ nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
346
+ )
347
+ self.q_proj = quant_noise(
348
+ nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size
349
+ )
350
+
351
+ self.out_proj = quant_noise(
352
+ nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
353
+ )
354
+
355
+ if add_bias_kv:
356
+ self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
357
+ self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
358
+ else:
359
+ self.bias_k = self.bias_v = None
360
+
361
+ self.add_zero_attn = add_zero_attn
362
+
363
+ self.gru_rel_pos = gru_rel_pos
364
+ if self.gru_rel_pos:
365
+ self.grep_linear = nn.Linear(self.q_head_dim, 8)
366
+ self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
367
+
368
+ self.reset_parameters()
369
+
370
+ def reset_parameters(self):
371
+ if self.qkv_same_dim:
372
+ # Empirically observed the convergence to be much better with
373
+ # the scaled initialization
374
+ nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
375
+ nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
376
+ nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
377
+ else:
378
+ nn.init.xavier_uniform_(self.k_proj.weight)
379
+ nn.init.xavier_uniform_(self.v_proj.weight)
380
+ nn.init.xavier_uniform_(self.q_proj.weight)
381
+
382
+ nn.init.xavier_uniform_(self.out_proj.weight)
383
+ if self.out_proj.bias is not None:
384
+ nn.init.constant_(self.out_proj.bias, 0.0)
385
+ if self.bias_k is not None:
386
+ nn.init.xavier_normal_(self.bias_k)
387
+ if self.bias_v is not None:
388
+ nn.init.xavier_normal_(self.bias_v)
389
+ if self.has_relative_attention_bias:
390
+ nn.init.xavier_normal_(self.relative_attention_bias.weight)
391
+
392
+ def _relative_positions_bucket(self, relative_positions, bidirectional=True):
393
+ num_buckets = self.num_buckets
394
+ max_distance = self.max_distance
395
+ relative_buckets = 0
396
+
397
+ if bidirectional:
398
+ num_buckets = num_buckets // 2
399
+ relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
400
+ relative_positions = torch.abs(relative_positions)
401
+ else:
402
+ relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
403
+
404
+ max_exact = num_buckets // 2
405
+ is_small = relative_positions < max_exact
406
+
407
+ relative_postion_if_large = max_exact + (
408
+ torch.log(relative_positions.float() / max_exact)
409
+ / math.log(max_distance / max_exact)
410
+ * (num_buckets - max_exact)
411
+ ).to(torch.long)
412
+ relative_postion_if_large = torch.min(
413
+ relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
414
+ )
415
+
416
+ relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
417
+ return relative_buckets
418
+
419
+ def compute_bias(self, query_length, key_length):
420
+ context_position = torch.arange(query_length, dtype=torch.long)[:, None]
421
+ memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
422
+ relative_position = memory_position - context_position
423
+ relative_position_bucket = self._relative_positions_bucket(
424
+ relative_position,
425
+ bidirectional=True
426
+ )
427
+ relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
428
+ values = self.relative_attention_bias(relative_position_bucket)
429
+ values = values.permute([2, 0, 1])
430
+ return values
431
+
432
+ def forward(
433
+ self,
434
+ query,
435
+ key: Optional[Tensor],
436
+ value: Optional[Tensor],
437
+ key_padding_mask: Optional[Tensor] = None,
438
+ incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
439
+ need_weights: bool = True,
440
+ static_kv: bool = False,
441
+ attn_mask: Optional[Tensor] = None,
442
+ before_softmax: bool = False,
443
+ need_head_weights: bool = False,
444
+ position_bias: Optional[Tensor] = None
445
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
446
+ """Input shape: Time x Batch x Channel
447
+
448
+ Args:
449
+ key_padding_mask (ByteTensor, optional): mask to exclude
450
+ keys that are pads, of shape `(batch, src_len)`, where
451
+ padding elements are indicated by 1s.
452
+ need_weights (bool, optional): return the attention weights,
453
+ averaged over heads (default: False).
454
+ attn_mask (ByteTensor, optional): typically used to
455
+ implement causal attention, where the mask prevents the
456
+ attention from looking forward in time (default: None).
457
+ before_softmax (bool, optional): return the raw attention
458
+ weights and values before the attention softmax.
459
+ need_head_weights (bool, optional): return the attention
460
+ weights for each head. Implies *need_weights*. Default:
461
+ return the average attention weights over all heads.
462
+ """
463
+ if need_head_weights:
464
+ need_weights = True
465
+
466
+ is_tpu = query.device.type == "xla"
467
+
468
+ tgt_len, bsz, embed_dim = query.size()
469
+ src_len = tgt_len
470
+ assert embed_dim == self.embed_dim
471
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
472
+ if key is not None:
473
+ src_len, key_bsz, _ = key.size()
474
+ if not torch.jit.is_scripting():
475
+ assert key_bsz == bsz
476
+ assert value is not None
477
+ assert src_len, bsz == value.shape[:2]
478
+
479
+ if self.has_relative_attention_bias and position_bias is None:
480
+ position_bias = self.compute_bias(tgt_len, src_len)
481
+ position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
482
+
483
+ if incremental_state is not None:
484
+ saved_state = self._get_input_buffer(incremental_state)
485
+ if saved_state is not None and "prev_key" in saved_state:
486
+ # previous time steps are cached - no need to recompute
487
+ # key and value if they are static
488
+ if static_kv:
489
+ assert self.encoder_decoder_attention and not self.self_attention
490
+ key = value = None
491
+ else:
492
+ saved_state = None
493
+
494
+ if self.self_attention:
495
+ q = self.q_proj(query)
496
+ k = self.k_proj(query)
497
+ v = self.v_proj(query)
498
+ elif self.encoder_decoder_attention:
499
+ # encoder-decoder attention
500
+ q = self.q_proj(query)
501
+ if key is None:
502
+ assert value is None
503
+ k = v = None
504
+ else:
505
+ k = self.k_proj(key)
506
+ v = self.v_proj(key)
507
+
508
+ else:
509
+ assert key is not None and value is not None
510
+ q = self.q_proj(query)
511
+ k = self.k_proj(key)
512
+ v = self.v_proj(value)
513
+ q *= self.scaling
514
+ alpha = 32
515
+ q *= 1 / alpha
516
+
517
+ if self.bias_k is not None:
518
+ assert self.bias_v is not None
519
+ k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
520
+ v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
521
+ if attn_mask is not None:
522
+ attn_mask = torch.cat(
523
+ [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
524
+ )
525
+ if key_padding_mask is not None:
526
+ key_padding_mask = torch.cat(
527
+ [
528
+ key_padding_mask,
529
+ key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
530
+ ],
531
+ dim=1,
532
+ )
533
+
534
+ q = (
535
+ q.contiguous()
536
+ .view(tgt_len, bsz * self.num_heads, self.q_head_dim)
537
+ .transpose(0, 1)
538
+ )
539
+ if k is not None:
540
+ k = (
541
+ k.contiguous()
542
+ .view(-1, bsz * self.num_heads, self.k_head_dim)
543
+ .transpose(0, 1)
544
+ )
545
+ if v is not None:
546
+ v = (
547
+ v.contiguous()
548
+ .view(-1, bsz * self.num_heads, self.head_dim)
549
+ .transpose(0, 1)
550
+ )
551
+
552
+ if saved_state is not None:
553
+ # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
554
+ if "prev_key" in saved_state:
555
+ _prev_key = saved_state["prev_key"]
556
+ assert _prev_key is not None
557
+ prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
558
+ if static_kv:
559
+ k = prev_key
560
+ else:
561
+ assert k is not None
562
+ k = torch.cat([prev_key, k], dim=1)
563
+ src_len = k.size(1)
564
+ if "prev_value" in saved_state:
565
+ _prev_value = saved_state["prev_value"]
566
+ assert _prev_value is not None
567
+ prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
568
+ if static_kv:
569
+ v = prev_value
570
+ else:
571
+ assert v is not None
572
+ v = torch.cat([prev_value, v], dim=1)
573
+ prev_key_padding_mask: Optional[Tensor] = None
574
+ if "prev_key_padding_mask" in saved_state:
575
+ prev_key_padding_mask = saved_state["prev_key_padding_mask"]
576
+ assert k is not None and v is not None
577
+ key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
578
+ key_padding_mask=key_padding_mask,
579
+ prev_key_padding_mask=prev_key_padding_mask,
580
+ batch_size=bsz,
581
+ src_len=k.size(1),
582
+ static_kv=static_kv,
583
+ )
584
+
585
+ saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
586
+ saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
587
+ saved_state["prev_key_padding_mask"] = key_padding_mask
588
+ # In this branch incremental_state is never None
589
+ assert incremental_state is not None
590
+ incremental_state = self._set_input_buffer(incremental_state, saved_state)
591
+ assert k is not None
592
+ assert k.size(1) == src_len
593
+
594
+ # This is part of a workaround to get around fork/join parallelism
595
+ # not supporting Optional types.
596
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
597
+ key_padding_mask = None
598
+
599
+ if key_padding_mask is not None:
600
+ assert key_padding_mask.size(0) == bsz
601
+ assert key_padding_mask.size(1) == src_len
602
+
603
+ if self.add_zero_attn:
604
+ assert v is not None
605
+ src_len += 1
606
+ k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
607
+ v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
608
+ if attn_mask is not None:
609
+ attn_mask = torch.cat(
610
+ [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
611
+ )
612
+ if key_padding_mask is not None:
613
+ key_padding_mask = torch.cat(
614
+ [
615
+ key_padding_mask,
616
+ torch.zeros(key_padding_mask.size(0), 1).type_as(
617
+ key_padding_mask
618
+ ),
619
+ ],
620
+ dim=1,
621
+ )
622
+
623
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
624
+ attn_weights = (attn_weights - attn_weights.max(dim=-1, keepdim=True)[0]) * alpha
625
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
626
+
627
+ assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
628
+
629
+ if attn_mask is not None:
630
+ attn_mask = attn_mask.unsqueeze(0)
631
+ attn_weights += attn_mask
632
+
633
+ if key_padding_mask is not None:
634
+ # don't attend to padding symbols
635
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
636
+ if not is_tpu:
637
+ attn_weights = attn_weights.masked_fill(
638
+ key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
639
+ float("-inf"),
640
+ )
641
+ else:
642
+ attn_weights = attn_weights.transpose(0, 2)
643
+ attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
644
+ attn_weights = attn_weights.transpose(0, 2)
645
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
646
+
647
+ if before_softmax:
648
+ return attn_weights, v, position_bias
649
+
650
+ if position_bias is not None:
651
+ attn_mask_rel_pos = position_bias
652
+ if self.gru_rel_pos == 1:
653
+ query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) * alpha / self.scaling
654
+ _B, _H, _L, __ = query_layer.size()
655
+ gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
656
+ _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
657
+ gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
658
+ attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, tgt_len, 1) * position_bias
659
+
660
+ attn_mask_rel_pos = attn_mask_rel_pos.view(attn_weights.size())
661
+
662
+ attn_weights = attn_weights + attn_mask_rel_pos
663
+
664
+ attn_weights_float = F.softmax(
665
+ attn_weights, dim=-1
666
+ )
667
+ attn_weights = attn_weights_float.type_as(attn_weights)
668
+ attn_probs = self.dropout_module(attn_weights)
669
+
670
+ assert v is not None
671
+ attn = torch.bmm(attn_probs, v)
672
+ assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
673
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
674
+ attn = self.out_proj(attn)
675
+ attn_weights: Optional[Tensor] = None
676
+ if need_weights:
677
+ attn_weights = attn_weights_float.view(
678
+ bsz, self.num_heads, tgt_len, src_len
679
+ ).transpose(1, 0)
680
+ if not need_head_weights:
681
+ # average attention weights over heads
682
+ attn_weights = attn_weights.mean(dim=0)
683
+
684
+ return attn, attn_weights, position_bias
685
+
686
+ @staticmethod
687
+ def _append_prev_key_padding_mask(
688
+ key_padding_mask: Optional[Tensor],
689
+ prev_key_padding_mask: Optional[Tensor],
690
+ batch_size: int,
691
+ src_len: int,
692
+ static_kv: bool,
693
+ ) -> Optional[Tensor]:
694
+ # saved key padding masks have shape (bsz, seq_len)
695
+ if prev_key_padding_mask is not None and static_kv:
696
+ new_key_padding_mask = prev_key_padding_mask
697
+ elif prev_key_padding_mask is not None and key_padding_mask is not None:
698
+ new_key_padding_mask = torch.cat(
699
+ [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
700
+ )
701
+ # During incremental decoding, as the padding token enters and
702
+ # leaves the frame, there will be a time when prev or current
703
+ # is None
704
+ elif prev_key_padding_mask is not None:
705
+ if src_len > prev_key_padding_mask.size(1):
706
+ filler = torch.zeros(
707
+ (batch_size, src_len - prev_key_padding_mask.size(1)),
708
+ device=prev_key_padding_mask.device,
709
+ )
710
+ new_key_padding_mask = torch.cat(
711
+ [prev_key_padding_mask.float(), filler.float()], dim=1
712
+ )
713
+ else:
714
+ new_key_padding_mask = prev_key_padding_mask.float()
715
+ elif key_padding_mask is not None:
716
+ if src_len > key_padding_mask.size(1):
717
+ filler = torch.zeros(
718
+ (batch_size, src_len - key_padding_mask.size(1)),
719
+ device=key_padding_mask.device,
720
+ )
721
+ new_key_padding_mask = torch.cat(
722
+ [filler.float(), key_padding_mask.float()], dim=1
723
+ )
724
+ else:
725
+ new_key_padding_mask = key_padding_mask.float()
726
+ else:
727
+ new_key_padding_mask = prev_key_padding_mask
728
+ return new_key_padding_mask
729
+
730
+ def _get_input_buffer(
731
+ self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
732
+ ) -> Dict[str, Optional[Tensor]]:
733
+ result = self.get_incremental_state(incremental_state, "attn_state")
734
+ if result is not None:
735
+ return result
736
+ else:
737
+ empty_result: Dict[str, Optional[Tensor]] = {}
738
+ return empty_result
739
+
740
+ def _set_input_buffer(
741
+ self,
742
+ incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
743
+ buffer: Dict[str, Optional[Tensor]],
744
+ ):
745
+ return self.set_incremental_state(incremental_state, "attn_state", buffer)
746
+
747
+ def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
748
+ return attn_weights
749
+
750
+
751
+ def init_bert_params(module):
752
+ """
753
+ Initialize the weights specific to the BERT Model.
754
+ This overrides the default initializations depending on the specified arguments.
755
+ 1. If normal_init_linear_weights is set then weights of linear
756
+ layer will be initialized using the normal distribution and
757
+ bais will be set to the specified value.
758
+ 2. If normal_init_embed_weights is set then weights of embedding
759
+ layer will be initialized using the normal distribution.
760
+ 3. If normal_init_proj_weights is set then weights of
761
+ in_project_weight for MultiHeadAttention initialized using
762
+ the normal distribution (to be validated).
763
+ """
764
+
765
+ def normal_(data):
766
+ # with FSDP, module params will be on CUDA, so we cast them back to CPU
767
+ # so that the RNG is consistent with and without FSDP
768
+ data.copy_(
769
+ data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
770
+ )
771
+
772
+ if isinstance(module, nn.Linear):
773
+ normal_(module.weight.data)
774
+ if module.bias is not None:
775
+ module.bias.data.zero_()
776
+ if isinstance(module, nn.Embedding):
777
+ normal_(module.weight.data)
778
+ if module.padding_idx is not None:
779
+ module.weight.data[module.padding_idx].zero_()
780
+ if isinstance(module, MultiheadAttention):
781
+ normal_(module.q_proj.weight.data)
782
+ normal_(module.k_proj.weight.data)
783
+ normal_(module.v_proj.weight.data)
CoCap/cocap/modules/beats/modules.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+ import math
11
+ import warnings
12
+ import torch
13
+ from torch import Tensor, nn
14
+ import torch.nn.functional as F
15
+
16
+
17
+ class GradMultiply(torch.autograd.Function):
18
+ @staticmethod
19
+ def forward(ctx, x, scale):
20
+ ctx.scale = scale
21
+ res = x.new(x)
22
+ return res
23
+
24
+ @staticmethod
25
+ def backward(ctx, grad):
26
+ return grad * ctx.scale, None
27
+
28
+
29
+ class SamePad(nn.Module):
30
+ def __init__(self, kernel_size, causal=False):
31
+ super().__init__()
32
+ if causal:
33
+ self.remove = kernel_size - 1
34
+ else:
35
+ self.remove = 1 if kernel_size % 2 == 0 else 0
36
+
37
+ def forward(self, x):
38
+ if self.remove > 0:
39
+ x = x[:, :, : -self.remove]
40
+ return x
41
+
42
+
43
+ class Swish(nn.Module):
44
+ def __init__(self):
45
+ super(Swish, self).__init__()
46
+ self.act = torch.nn.Sigmoid()
47
+
48
+ def forward(self, x):
49
+ return x * self.act(x)
50
+
51
+
52
+ class GLU_Linear(nn.Module):
53
+ def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True):
54
+ super(GLU_Linear, self).__init__()
55
+
56
+ self.glu_type = glu_type
57
+ self.output_dim = output_dim
58
+
59
+ if glu_type == "sigmoid":
60
+ self.glu_act = torch.nn.Sigmoid()
61
+ elif glu_type == "swish":
62
+ self.glu_act = Swish()
63
+ elif glu_type == "relu":
64
+ self.glu_act = torch.nn.ReLU()
65
+ elif glu_type == "gelu":
66
+ self.glu_act = torch.nn.GELU()
67
+
68
+ if bias_in_glu:
69
+ self.linear = nn.Linear(input_dim, output_dim * 2, True)
70
+ else:
71
+ self.linear = nn.Linear(input_dim, output_dim * 2, False)
72
+
73
+ def forward(self, x):
74
+ # to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case
75
+ x = self.linear(x)
76
+
77
+ if self.glu_type == "bilinear":
78
+ x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2])
79
+ else:
80
+ x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2]))
81
+
82
+ return x
83
+
84
+
85
+ def gelu_accurate(x):
86
+ if not hasattr(gelu_accurate, "_a"):
87
+ gelu_accurate._a = math.sqrt(2 / math.pi)
88
+ return (
89
+ 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
90
+ )
91
+
92
+
93
+ def gelu(x: torch.Tensor) -> torch.Tensor:
94
+ return torch.nn.functional.gelu(x.float()).type_as(x)
95
+
96
+
97
+ def get_activation_fn(activation: str):
98
+ """Returns the activation function corresponding to `activation`"""
99
+
100
+ if activation == "relu":
101
+ return F.relu
102
+ elif activation == "gelu":
103
+ return gelu
104
+ elif activation == "gelu_fast":
105
+ warnings.warn(
106
+ "--activation-fn=gelu_fast has been renamed to gelu_accurate"
107
+ )
108
+ return gelu_accurate
109
+ elif activation == "gelu_accurate":
110
+ return gelu_accurate
111
+ elif activation == "tanh":
112
+ return torch.tanh
113
+ elif activation == "linear":
114
+ return lambda x: x
115
+ elif activation == "glu":
116
+ return lambda x: x
117
+ else:
118
+ raise RuntimeError("--activation-fn {} not supported".format(activation))
119
+
120
+
121
+ def quant_noise(module, p, block_size):
122
+ """
123
+ Wraps modules and applies quantization noise to the weights for
124
+ subsequent quantization with Iterative Product Quantization as
125
+ described in "Training with Quantization Noise for Extreme Model Compression"
126
+
127
+ Args:
128
+ - module: nn.Module
129
+ - p: amount of Quantization Noise
130
+ - block_size: size of the blocks for subsequent quantization with iPQ
131
+
132
+ Remarks:
133
+ - Module weights must have the right sizes wrt the block size
134
+ - Only Linear, Embedding and Conv2d modules are supported for the moment
135
+ - For more detail on how to quantize by blocks with convolutional weights,
136
+ see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
137
+ - We implement the simplest form of noise here as stated in the paper
138
+ which consists in randomly dropping blocks
139
+ """
140
+
141
+ # if no quantization noise, don't register hook
142
+ if p <= 0:
143
+ return module
144
+
145
+ # supported modules
146
+ assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
147
+
148
+ # test whether module.weight has the right sizes wrt block_size
149
+ is_conv = module.weight.ndim == 4
150
+
151
+ # 2D matrix
152
+ if not is_conv:
153
+ assert (
154
+ module.weight.size(1) % block_size == 0
155
+ ), "Input features must be a multiple of block sizes"
156
+
157
+ # 4D matrix
158
+ else:
159
+ # 1x1 convolutions
160
+ if module.kernel_size == (1, 1):
161
+ assert (
162
+ module.in_channels % block_size == 0
163
+ ), "Input channels must be a multiple of block sizes"
164
+ # regular convolutions
165
+ else:
166
+ k = module.kernel_size[0] * module.kernel_size[1]
167
+ assert k % block_size == 0, "Kernel size must be a multiple of block size"
168
+
169
+ def _forward_pre_hook(mod, input):
170
+ # no noise for evaluation
171
+ if mod.training:
172
+ if not is_conv:
173
+ # gather weight and sizes
174
+ weight = mod.weight
175
+ in_features = weight.size(1)
176
+ out_features = weight.size(0)
177
+
178
+ # split weight matrix into blocks and randomly drop selected blocks
179
+ mask = torch.zeros(
180
+ in_features // block_size * out_features, device=weight.device
181
+ )
182
+ mask.bernoulli_(p)
183
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
184
+
185
+ else:
186
+ # gather weight and sizes
187
+ weight = mod.weight
188
+ in_channels = mod.in_channels
189
+ out_channels = mod.out_channels
190
+
191
+ # split weight matrix into blocks and randomly drop selected blocks
192
+ if mod.kernel_size == (1, 1):
193
+ mask = torch.zeros(
194
+ int(in_channels // block_size * out_channels),
195
+ device=weight.device,
196
+ )
197
+ mask.bernoulli_(p)
198
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
199
+ else:
200
+ mask = torch.zeros(
201
+ weight.size(0), weight.size(1), device=weight.device
202
+ )
203
+ mask.bernoulli_(p)
204
+ mask = (
205
+ mask.unsqueeze(2)
206
+ .unsqueeze(3)
207
+ .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
208
+ )
209
+
210
+ # scale weights and apply mask
211
+ mask = mask.to(
212
+ torch.bool
213
+ ) # x.bool() is not currently supported in TorchScript
214
+ s = 1 / (1 - p)
215
+ mod.weight.data = s * weight.masked_fill(mask, 0)
216
+
217
+ module.register_forward_pre_hook(_forward_pre_hook)
218
+ return module
219
+
CoCap/cocap/modules/bert.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2023/2/18 13:57
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : bert.py
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+
12
+ def make_shifted_mask(input_mask, max_v_len, max_t_len, memory_len=0):
13
+ """
14
+ Args:
15
+ input_mask: (N, L) with `1` indicates valid bits, `0` indicates pad
16
+ max_v_len: int, the first `max_v_len` is for video and its padding, the length
17
+ of the rest of the bits is `max_t_len`. We have L = `max_v_len` + `max_t_len`.
18
+ Note max_v_len may also include the memory len (M), thus max_v_len += M
19
+ max_t_len: int
20
+ memory_len: int, M
21
+ Returns:
22
+
23
+ >>> max_v_len = 2; max_t_len=3; input_mask = torch.randn(2, 5)
24
+ >>> make_pad_shifted_mask(input_mask, max_v_len, max_t_len)[0]
25
+ tensor([[1., 1., 0., 0., 0.],
26
+ [1., 1., 0., 0., 0.],
27
+ [1., 1., 1., 0., 0.],
28
+ [1., 1., 1., 1., 0.],
29
+ [1., 1., 1., 1., 1.]])
30
+ """
31
+ bsz, seq_len = input_mask.shape
32
+ assert max_v_len + max_t_len + memory_len == seq_len, f"{max_v_len} {max_t_len} {memory_len} {seq_len}"
33
+ shifted_mask = input_mask.new_zeros(bsz, max_v_len + max_t_len, seq_len) # (N, L, M+L)
34
+ shifted_mask[:, :, :memory_len + max_v_len] = 1
35
+ shifted_mask[:, max_v_len:, memory_len + max_v_len:] = torch.tril(input_mask.new_ones(max_t_len, max_t_len),
36
+ diagonal=0)
37
+ return shifted_mask
38
+
39
+
40
+ def make_pad_shifted_mask(input_mask, max_v_len, max_t_len, memory_len=0):
41
+ """input_mask: (N, L), """
42
+ shifted_mask = make_shifted_mask(input_mask, max_v_len, max_t_len, memory_len=memory_len)
43
+ kg_masks = shifted_mask * input_mask.unsqueeze(1)
44
+ return kg_masks
45
+
46
+
47
+ class BertSelfAttention(nn.Module):
48
+ def __init__(self, config):
49
+ super(BertSelfAttention, self).__init__()
50
+ if config.hidden_size % config.num_attention_heads != 0:
51
+ raise ValueError(
52
+ "The hidden size (%d) is not a multiple of the number of attention "
53
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads))
54
+ self.num_attention_heads = config.num_attention_heads
55
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
56
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
57
+
58
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
59
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
60
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
61
+
62
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
63
+
64
+ def transpose_for_scores(self, x):
65
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) # (N, L, nh, dh)
66
+ x = x.view(*new_x_shape)
67
+ return x.permute(0, 2, 1, 3) # (N, nh, L, dh)
68
+
69
+ def forward(self, query_states, key_states, value_states, attention_mask):
70
+ """
71
+ Args:
72
+ query_states: (N, Lq, D)
73
+ key_states: (N, L, D)
74
+ value_states: (N, L, D)
75
+ attention_mask: (N, Lq, L)
76
+
77
+ Returns:
78
+
79
+ """
80
+ # only need to mask the dimension where the softmax (last dim) is applied, as another dim (second last)
81
+ # will be ignored in future computation anyway
82
+ mixed_query_layer = self.query(query_states)
83
+ mixed_key_layer = self.key(key_states)
84
+ mixed_value_layer = self.value(value_states)
85
+
86
+ query_layer = self.transpose_for_scores(mixed_query_layer) # (N, nh, Lq, dh)
87
+ key_layer = self.transpose_for_scores(mixed_key_layer) # (N, nh, L, dh)
88
+ value_layer = self.transpose_for_scores(mixed_value_layer) # (N, nh, L, dh)
89
+
90
+ # Take the dot product between "query" and "key" to get the raw attention scores.
91
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # (N, nh, Lq, L)
92
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
93
+ # Apply the attention mask is (precomputed for all modules in BertModel forward() function)
94
+ if attention_mask != None:
95
+ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000. # (N, 1, Lq, L)
96
+ attention_scores = attention_scores + attention_mask
97
+
98
+ # Normalize the attention scores to probabilities.
99
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
100
+
101
+ # This is actually dropping out entire tokens to attend to, which might
102
+ # seem a bit unusual, but is taken from the original Transformer paper.
103
+ attention_probs = self.dropout(attention_probs)
104
+
105
+ context_layer = torch.matmul(attention_probs, value_layer)
106
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
107
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
108
+ context_layer = context_layer.view(*new_context_layer_shape)
109
+ return context_layer
110
+
111
+
112
+ class BertSelfOutput(nn.Module):
113
+ def __init__(self, config):
114
+ super(BertSelfOutput, self).__init__()
115
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
116
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
117
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
118
+
119
+ def forward(self, hidden_states, input_tensor):
120
+ hidden_states = self.dense(hidden_states)
121
+ hidden_states = self.dropout(hidden_states)
122
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
123
+ return hidden_states
124
+
125
+
126
+ class BertAttention(nn.Module):
127
+ def __init__(self, config):
128
+ super(BertAttention, self).__init__()
129
+ self.self = BertSelfAttention(config)
130
+ self.output = BertSelfOutput(config)
131
+
132
+ def forward(self, input_tensor, attention_mask):
133
+ self_output = self.self(input_tensor, input_tensor, input_tensor, attention_mask)
134
+ attention_output = self.output(self_output, input_tensor)
135
+ return attention_output
136
+
137
+
138
+ class BertAttention_Cross(nn.Module):
139
+ def __init__(self, config):
140
+ super(BertAttention_Cross, self).__init__()
141
+ self.self = BertSelfAttention(config)
142
+ self.output = BertSelfOutput(config)
143
+
144
+ def forward(self, q, kv, attention_mask=None):
145
+ self_output = self.self(q, kv, kv, attention_mask)
146
+ attention_output = self.output(self_output, q)
147
+ return attention_output
148
+
149
+
150
+ def gelu(x):
151
+ """Implementation of the gelu activation function.
152
+ For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results):
153
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
154
+ Also see https://arxiv.org/abs/1606.08415
155
+ """
156
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
157
+
158
+
159
+ class BertIntermediate(nn.Module):
160
+ def __init__(self, config):
161
+ super(BertIntermediate, self).__init__()
162
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
163
+ self.intermediate_act_fn = gelu
164
+
165
+ def forward(self, hidden_states):
166
+ hidden_states = self.dense(hidden_states)
167
+ hidden_states = self.intermediate_act_fn(hidden_states)
168
+ return hidden_states
169
+
170
+
171
+ class BertLayerNorm(nn.Module):
172
+ def __init__(self, hidden_size, eps=1e-12):
173
+ """Construct a layernorm module in the TF style (epsilon inside the square root).
174
+ """
175
+ super(BertLayerNorm, self).__init__()
176
+ self.weight = nn.Parameter(torch.ones(hidden_size))
177
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
178
+ self.variance_epsilon = eps
179
+
180
+ def forward(self, x):
181
+ u = x.mean(-1, keepdim=True)
182
+ s = (x - u).pow(2).mean(-1, keepdim=True)
183
+ x = (x - u) / torch.sqrt(s + self.variance_epsilon)
184
+ return self.weight * x + self.bias
185
+
186
+
187
+ class BertOutput(nn.Module):
188
+ def __init__(self, config):
189
+ super(BertOutput, self).__init__()
190
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
191
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
192
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
193
+
194
+ def forward(self, hidden_states, input_tensor):
195
+ hidden_states = self.dense(hidden_states)
196
+ hidden_states = self.dropout(hidden_states)
197
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
198
+ return hidden_states
199
+
200
+
201
+ class BertLayerNoMemory(nn.Module):
202
+ def __init__(self, config):
203
+ super(BertLayerNoMemory, self).__init__()
204
+ self.config = config
205
+ self.attention = BertAttention(config)
206
+ self.hidden_intermediate = BertIntermediate(config)
207
+ self.memory_intermediate = BertIntermediate(config)
208
+ self.output = BertOutput(config)
209
+
210
+ def forward(self, hidden_states, attention_mask, n_v, n_t):
211
+ """
212
+ Args:
213
+ hidden_states: (N, L, D)
214
+ attention_mask: (N, L)
215
+ Returns:
216
+
217
+ """
218
+ # self-attention, need to shift right
219
+ shifted_self_mask = make_pad_shifted_mask(attention_mask, n_v, n_t) # (N, L, L)
220
+ attention_output = self.attention(hidden_states, shifted_self_mask) # (N, L, D)
221
+ intermediate_output = self.hidden_intermediate(attention_output) # (N, L, D)
222
+ layer_output = self.output(intermediate_output, attention_output) # (N, L, D)
223
+ return layer_output
224
+
225
+
226
+ class BertSelfEncoder(nn.Module):
227
+ def __init__(self, cap_config):
228
+ super(BertSelfEncoder, self).__init__()
229
+ self.word_embeddings = nn.Embedding(cap_config.vocab_size, cap_config.word_vec_size, padding_idx=0) # 词嵌入
230
+ self.word_fc = nn.Sequential( # 300->768
231
+ BertLayerNorm(cap_config.word_vec_size, eps=cap_config.layer_norm_eps),
232
+ nn.Dropout(cap_config.hidden_dropout_prob),
233
+ nn.Linear(cap_config.word_vec_size, cap_config.hidden_size),
234
+ nn.ReLU(True),
235
+ BertLayerNorm(cap_config.hidden_size, eps=cap_config.layer_norm_eps),
236
+ )
237
+ self.video_embeddings = nn.Sequential(
238
+ BertLayerNorm(cap_config.video_feature_size, eps=cap_config.layer_norm_eps),
239
+ nn.Dropout(cap_config.hidden_dropout_prob),
240
+ nn.Linear(cap_config.video_feature_size, cap_config.hidden_size),
241
+ nn.ReLU(True),
242
+ BertLayerNorm(cap_config.hidden_size, eps=cap_config.layer_norm_eps),
243
+ )
244
+ self.position_embeddings = PositionEncoding(n_filters=cap_config.hidden_size, max_len=1000)
245
+ self.token_type_embeddings = nn.Embedding(3, cap_config.hidden_size)
246
+ self.LayerNorm = BertLayerNorm(cap_config.hidden_size, eps=cap_config.layer_norm_eps)
247
+ self.dropout = nn.Dropout(cap_config.hidden_dropout_prob)
248
+
249
+ self.config = cap_config
250
+ self.layers = cap_config.num_hidden_layers
251
+ self.layer_sa = nn.ModuleList([BertLayerNoMemory(cap_config) for _ in range(self.layers)])
252
+
253
+ def forward(self, vhidden_states, thidden_states, attention_mask, type_ids, encoded=False):
254
+ if not encoded:
255
+ # thidden_states could be encoded (pre-encoded using another text encoder), if not, add word embedding
256
+ thidden_states = self.word_embeddings(thidden_states)
257
+ thidden_states = self.word_fc(thidden_states)
258
+
259
+ vhidden_states = self.video_embeddings(vhidden_states)
260
+ hidden_states = torch.cat((vhidden_states, thidden_states), dim=1)
261
+
262
+ token_type_embeddings = self.token_type_embeddings(type_ids)
263
+ hidden_states = hidden_states + token_type_embeddings
264
+ hidden_states = self.position_embeddings(hidden_states)
265
+ hidden_states = self.LayerNorm(hidden_states)
266
+ hidden_states = self.dropout(hidden_states)
267
+
268
+ for i in range(self.layers):
269
+ hidden_states = self.layer_sa[i](hidden_states, attention_mask, self.config.max_v_len,
270
+ self.config.max_t_len)
271
+ return hidden_states
272
+
273
+
274
+ class BertLayerNoMemory_Cross(nn.Module):
275
+ def __init__(self, config):
276
+ super(BertLayerNoMemory_Cross, self).__init__()
277
+ self.config = config
278
+ self.attention = BertAttention_Cross(config)
279
+ self.hidden_intermediate = BertIntermediate(config)
280
+ self.output = BertOutput(config)
281
+
282
+ def forward(self, q, kv):
283
+ # shifted_self_mask = make_pad_cross_mask() # (N, L, L)
284
+ attention_output = self.attention(q, kv) # (N, L, D)
285
+ intermediate_output = self.hidden_intermediate(attention_output) # (N, L, D)
286
+ layer_output = self.output(intermediate_output, attention_output) # (N, L, D)
287
+ return layer_output
288
+
289
+
290
+ class BertCrosEncoder(nn.Module):
291
+ def __init__(self, cap_config):
292
+ super(BertCrosEncoder, self).__init__()
293
+ self.word_fc = nn.Sequential( # 300->768
294
+ BertLayerNorm(cap_config.word_vec_size, eps=cap_config.layer_norm_eps),
295
+ nn.Dropout(cap_config.hidden_dropout_prob),
296
+ nn.Linear(cap_config.word_vec_size, cap_config.hidden_size),
297
+ nn.ReLU(True),
298
+ BertLayerNorm(cap_config.hidden_size, eps=cap_config.layer_norm_eps),
299
+ )
300
+ self.video_embeddings = nn.Sequential(
301
+ BertLayerNorm(cap_config.video_feature_size, eps=cap_config.layer_norm_eps),
302
+ nn.Dropout(cap_config.hidden_dropout_prob),
303
+ nn.Linear(cap_config.video_feature_size, cap_config.hidden_size),
304
+ nn.ReLU(True),
305
+ BertLayerNorm(cap_config.hidden_size, eps=cap_config.layer_norm_eps),
306
+ )
307
+ self.layers = cap_config.num_hidden_layers
308
+ self.layer_sa = nn.ModuleList([BertLayerNoMemory(cap_config) for _ in range(self.layers)])
309
+ self.layer_ca = nn.ModuleList([BertLayerNoMemory_Cross(cap_config) for _ in range(self.layers)])
310
+ self.max_t_len = cap_config.max_t_len
311
+
312
+ def forward(self, vhidden_states, thidden_states, tmask, type_ids):
313
+ thidden_states = self.word_fc(thidden_states)
314
+ vhidden_states = self.video_embeddings(vhidden_states)
315
+ for i in range(self.layers):
316
+ # thidden_states = self.layer_sa[i](thidden_states, tmask.int(), 0, self.max_t_len) 加上效果变差
317
+ thidden_states = self.layer_ca[i](thidden_states, vhidden_states)
318
+ return thidden_states
319
+
320
+
321
+ class BertPredictionHeadTransform(nn.Module):
322
+ def __init__(self, config):
323
+ super(BertPredictionHeadTransform, self).__init__()
324
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
325
+ self.transform_act_fn = gelu
326
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
327
+
328
+ def forward(self, hidden_states):
329
+ """(N, L, D)"""
330
+ hidden_states = self.dense(hidden_states)
331
+ hidden_states = self.transform_act_fn(hidden_states)
332
+ hidden_states = self.LayerNorm(hidden_states)
333
+ return hidden_states
334
+
335
+
336
+ class BertLMPredictionHead(nn.Module):
337
+ def __init__(self, config, bert_model_embedding_weights=None):
338
+ super(BertLMPredictionHead, self).__init__()
339
+ self.transform = BertPredictionHeadTransform(config)
340
+
341
+ # The output weights are the same as the input embeddings, but there is
342
+ # an output-only bias for each token.
343
+ if config.share_wd_cls_weight:
344
+ assert bert_model_embedding_weights is not None, \
345
+ "bert_model_embedding_weights should not be None " \
346
+ "when setting --share_wd_cls_weight flag to be true"
347
+ assert config.hidden_size == bert_model_embedding_weights.size(1), \
348
+ "hidden size has be the same as word embedding size when " \
349
+ "sharing word embedding weight and classifier weight"
350
+ self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
351
+ bert_model_embedding_weights.size(0),
352
+ bias=False)
353
+ self.decoder.weight = torch.nn.Parameter(bert_model_embedding_weights.clone())
354
+ else:
355
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
356
+
357
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
358
+
359
+ def forward(self, hidden_states):
360
+ """(N, L, D)"""
361
+ hidden_states = self.transform(hidden_states)
362
+ hidden_states = self.decoder(hidden_states) + self.bias
363
+ return hidden_states # (N, L, vocab_size)
364
+
365
+
366
+ class PositionEncoding(nn.Module):
367
+ """
368
+ Add positional information to input tensor.
369
+ :Examples:
370
+ >>> model = PositionEncoding(d_model=6, max_len=10, dropout=0)
371
+ >>> test_input1 = torch.zeros(3, 10, 6)
372
+ >>> output1 = model(test_input1)
373
+ >>> output1.size()
374
+ >>> test_input2 = torch.zeros(5, 3, 9, 6)
375
+ >>> output2 = model(test_input2)
376
+ >>> output2.size()
377
+ """
378
+
379
+ def __init__(self, n_filters=128, max_len=500):
380
+ """
381
+ :param n_filters: same with input hidden size
382
+ :param max_len: maximum sequence length
383
+ """
384
+ super(PositionEncoding, self).__init__()
385
+ # Compute the positional encodings once in log space.
386
+ pe = torch.zeros(max_len, n_filters) # (L, D)
387
+ position = torch.arange(0, max_len).float().unsqueeze(1)
388
+ div_term = torch.exp(torch.arange(0, n_filters, 2).float() * - (math.log(10000.0) / n_filters))
389
+ pe[:, 0::2] = torch.sin(position * div_term)
390
+ pe[:, 1::2] = torch.cos(position * div_term)
391
+ self.register_buffer("pe", pe) # buffer is a tensor, not a variable, (L, D)
392
+
393
+ def forward(self, x):
394
+ """
395
+ :Input: (*, L, D)
396
+ :Output: (*, L, D) the same size as input
397
+ """
398
+ pe = self.pe.data[:x.size(-2), :] # (#x.size(-2), n_filters)
399
+ extra_dim = len(x.size()) - 2
400
+ for _ in range(extra_dim):
401
+ pe = pe.unsqueeze(0)
402
+ x = x + pe
403
+ return x
CoCap/cocap/modules/clip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .clip import *
CoCap/cocap/modules/clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
CoCap/cocap/modules/clip/clip.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from typing import Optional
6
+ from typing import Union, List
7
+
8
+ import torch
9
+ from PIL import Image
10
+ from pkg_resources import packaging
11
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
12
+ from tqdm import tqdm
13
+
14
+ from .model import build_model
15
+ from .simple_tokenizer import SimpleTokenizer as _Tokenizer
16
+
17
+ try:
18
+ from torchvision.transforms import InterpolationMode
19
+
20
+ BICUBIC = InterpolationMode.BICUBIC
21
+ except ImportError:
22
+ BICUBIC = Image.BICUBIC
23
+
24
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
25
+ warnings.warn("PyTorch version 1.7.1 or higher is recommended")
26
+
27
+ __all__ = ["available_models", "load", "tokenize", "get_model_path"]
28
+ _tokenizer = _Tokenizer()
29
+
30
+ _MODELS = {
31
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
32
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
33
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
34
+ "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
35
+ "RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
36
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
37
+ "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
38
+ "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
39
+ "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
40
+ }
41
+
42
+
43
+ def _download(url: str, root: str):
44
+ os.makedirs(root, exist_ok=True)
45
+ filename = os.path.basename(url)
46
+
47
+ expected_sha256 = url.split("/")[-2]
48
+ download_target = os.path.join(root, filename)
49
+
50
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
51
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
52
+
53
+ if os.path.isfile(download_target):
54
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
55
+ return download_target
56
+ else:
57
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
58
+
59
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
60
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True,
61
+ unit_divisor=1024) as loop:
62
+ while True:
63
+ buffer = source.read(8192)
64
+ if not buffer:
65
+ break
66
+
67
+ output.write(buffer)
68
+ loop.update(len(buffer))
69
+
70
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
71
+ raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
72
+
73
+ return download_target
74
+
75
+
76
+ def _convert_image_to_rgb(image):
77
+ return image.convert("RGB")
78
+
79
+
80
+ def _transform(n_px):
81
+ return Compose([
82
+ Resize(n_px, interpolation=BICUBIC),
83
+ CenterCrop(n_px),
84
+ _convert_image_to_rgb,
85
+ ToTensor(),
86
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
87
+ ])
88
+
89
+
90
+ def available_models() -> List[str]:
91
+ """Returns the names of available CLIP modeling"""
92
+ return list(_MODELS.keys())
93
+
94
+
95
+ def get_model_path(name: str, download_root: Optional[str] = None) -> str:
96
+ if name in _MODELS:
97
+ model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
98
+ elif os.path.isfile(name):
99
+ model_path = name
100
+ else:
101
+ raise RuntimeError(f"Model {name} not found; available modeling = {available_models()}")
102
+ return model_path
103
+
104
+
105
+ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
106
+ jit: bool = False, download_root: str = None):
107
+ """Load a CLIP model
108
+
109
+ Parameters
110
+ ----------
111
+ name : str
112
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
113
+
114
+ device : Union[str, torch.device]
115
+ The device to put the loaded model
116
+
117
+ jit : bool
118
+ Whether to load the optimized JIT model or more hackable non-JIT model (default).
119
+
120
+ download_root: str
121
+ path to download the model files; by default, it uses "~/.cache/clip"
122
+
123
+ Returns
124
+ -------
125
+ model : torch.nn.Module
126
+ The CLIP model
127
+
128
+ preprocess : Callable[[PIL.Image], torch.Tensor]
129
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
130
+ """
131
+ if name in _MODELS:
132
+ model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
133
+ elif os.path.isfile(name):
134
+ model_path = name
135
+ else:
136
+ raise RuntimeError(f"Model {name} not found; available modeling = {available_models()}")
137
+
138
+ with open(model_path, 'rb') as opened_file:
139
+ try:
140
+ # loading JIT archive
141
+ model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
142
+ state_dict = None
143
+ except RuntimeError:
144
+ # loading saved state dict
145
+ if jit:
146
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
147
+ jit = False
148
+ state_dict = torch.load(opened_file, map_location="cpu")
149
+
150
+ if not jit:
151
+ model = build_model(state_dict or model.state_dict()).to(device)
152
+ if str(device) == "cpu":
153
+ model.float()
154
+ return model, _transform(model.visual.input_resolution)
155
+
156
+ # patch the device names
157
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
158
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
159
+
160
+ def patch_device(module):
161
+ try:
162
+ graphs = [module.graph] if hasattr(module, "graph") else []
163
+ except RuntimeError:
164
+ graphs = []
165
+
166
+ if hasattr(module, "forward1"):
167
+ graphs.append(module.forward1.graph)
168
+
169
+ for graph in graphs:
170
+ for node in graph.findAllNodes("prim::Constant"):
171
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
172
+ node.copyAttributes(device_node)
173
+
174
+ model.apply(patch_device)
175
+ patch_device(model.encode_image)
176
+ patch_device(model.encode_text)
177
+
178
+ # patch dtype to float32 on CPU
179
+ if str(device) == "cpu":
180
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
181
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
182
+ float_node = float_input.node()
183
+
184
+ def patch_float(module):
185
+ try:
186
+ graphs = [module.graph] if hasattr(module, "graph") else []
187
+ except RuntimeError:
188
+ graphs = []
189
+
190
+ if hasattr(module, "forward1"):
191
+ graphs.append(module.forward1.graph)
192
+
193
+ for graph in graphs:
194
+ for node in graph.findAllNodes("aten::to"):
195
+ inputs = list(node.inputs())
196
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
197
+ if inputs[i].node()["value"] == 5:
198
+ inputs[i].node().copyAttributes(float_node)
199
+
200
+ model.apply(patch_float)
201
+ patch_float(model.encode_image)
202
+ patch_float(model.encode_text)
203
+
204
+ model.float()
205
+
206
+ return model, _transform(model.input_resolution.item())
207
+
208
+
209
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[
210
+ torch.IntTensor, torch.LongTensor]:
211
+ """
212
+ Returns the tokenized representation of given input string(s)
213
+
214
+ Parameters
215
+ ----------
216
+ texts : Union[str, List[str]]
217
+ An input string or a list of input strings to tokenize
218
+
219
+ context_length : int
220
+ The context length to use; all CLIP modeling use 77 as the context length
221
+
222
+ truncate: bool
223
+ Whether to truncate the text in case its encoding is longer than the context length
224
+
225
+ Returns
226
+ -------
227
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
228
+ We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
229
+ """
230
+ if isinstance(texts, str):
231
+ texts = [texts]
232
+
233
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
234
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
235
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
236
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
237
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
238
+ else:
239
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
240
+
241
+ for i, tokens in enumerate(all_tokens):
242
+ if len(tokens) > context_length:
243
+ if truncate:
244
+ tokens = tokens[:context_length]
245
+ tokens[-1] = eot_token
246
+ else:
247
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
248
+ result[i, :len(tokens)] = torch.tensor(tokens)
249
+
250
+ return result
CoCap/cocap/modules/clip/model.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # For visualizing the attention map, we adopted some changes from
2
+ # https://github.com/ricardodeazambuja/CLIP/blob/attn_weights/clip/model.py
3
+
4
+
5
+ import logging
6
+ from collections import OrderedDict
7
+ from typing import Tuple, Union
8
+
9
+ import einops
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from torch import nn
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class Bottleneck(nn.Module):
19
+ expansion = 4
20
+
21
+ def __init__(self, inplanes, planes, stride=1):
22
+ super().__init__()
23
+
24
+ # all conv modules have stride 1. an avgpool is performed after the second convolution when stride > 1
25
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
26
+ self.bn1 = nn.BatchNorm2d(planes)
27
+ self.relu1 = nn.ReLU(inplace=True)
28
+
29
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
30
+ self.bn2 = nn.BatchNorm2d(planes)
31
+ self.relu2 = nn.ReLU(inplace=True)
32
+
33
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
34
+
35
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
36
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
37
+ self.relu3 = nn.ReLU(inplace=True)
38
+
39
+ self.downsample = None
40
+ self.stride = stride
41
+
42
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
43
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
44
+ self.downsample = nn.Sequential(OrderedDict([
45
+ ("-1", nn.AvgPool2d(stride)),
46
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
47
+ ("1", nn.BatchNorm2d(planes * self.expansion))
48
+ ]))
49
+
50
+ def forward(self, x: torch.Tensor):
51
+ identity = x
52
+
53
+ out = self.relu1(self.bn1(self.conv1(x)))
54
+ out = self.relu2(self.bn2(self.conv2(out)))
55
+ out = self.avgpool(out)
56
+ out = self.bn3(self.conv3(out))
57
+
58
+ if self.downsample is not None:
59
+ identity = self.downsample(x)
60
+
61
+ out += identity
62
+ out = self.relu3(out)
63
+ return out
64
+
65
+
66
+ class AttentionPool2d(nn.Module):
67
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
68
+ super().__init__()
69
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
70
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
71
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
72
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
73
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
74
+ self.num_heads = num_heads
75
+
76
+ def forward(self, x):
77
+ x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
78
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
79
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
80
+ x, _ = F.multi_head_attention_forward(
81
+ query=x[:1], key=x, value=x,
82
+ embed_dim_to_check=x.shape[-1],
83
+ num_heads=self.num_heads,
84
+ q_proj_weight=self.q_proj.weight,
85
+ k_proj_weight=self.k_proj.weight,
86
+ v_proj_weight=self.v_proj.weight,
87
+ in_proj_weight=None,
88
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
89
+ bias_k=None,
90
+ bias_v=None,
91
+ add_zero_attn=False,
92
+ dropout_p=0,
93
+ out_proj_weight=self.c_proj.weight,
94
+ out_proj_bias=self.c_proj.bias,
95
+ use_separate_proj_weight=True,
96
+ training=self.training,
97
+ need_weights=False
98
+ )
99
+ return x.squeeze(0)
100
+
101
+
102
+ class ModifiedResNet(nn.Module):
103
+ """
104
+ A ResNet class that is similar to torchvision's but contains the following changes:
105
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
106
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
107
+ - The final pooling layer is a QKV attention instead of an average pool
108
+ """
109
+
110
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
111
+ super().__init__()
112
+ self.output_dim = output_dim
113
+ self.input_resolution = input_resolution
114
+
115
+ # the 3-layer stem
116
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
117
+ self.bn1 = nn.BatchNorm2d(width // 2)
118
+ self.relu1 = nn.ReLU(inplace=True)
119
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
120
+ self.bn2 = nn.BatchNorm2d(width // 2)
121
+ self.relu2 = nn.ReLU(inplace=True)
122
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
123
+ self.bn3 = nn.BatchNorm2d(width)
124
+ self.relu3 = nn.ReLU(inplace=True)
125
+ self.avgpool = nn.AvgPool2d(2)
126
+
127
+ # residual modules
128
+ self._inplanes = width # this is a *mutable* variable used during construction
129
+ self.layer1 = self._make_layer(width, layers[0])
130
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
131
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
132
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
133
+
134
+ embed_dim = width * 32 # the ResNet feature dimension
135
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
136
+
137
+ def _make_layer(self, planes, blocks, stride=1):
138
+ layers = [Bottleneck(self._inplanes, planes, stride)]
139
+
140
+ self._inplanes = planes * Bottleneck.expansion
141
+ for _ in range(1, blocks):
142
+ layers.append(Bottleneck(self._inplanes, planes))
143
+
144
+ return nn.Sequential(*layers)
145
+
146
+ def forward(self, x):
147
+ def stem(x):
148
+ x = self.relu1(self.bn1(self.conv1(x)))
149
+ x = self.relu2(self.bn2(self.conv2(x)))
150
+ x = self.relu3(self.bn3(self.conv3(x)))
151
+ x = self.avgpool(x)
152
+ return x
153
+
154
+ x = x.type(self.conv1.weight.dtype)
155
+ x = stem(x)
156
+ x = self.layer1(x)
157
+ x = self.layer2(x)
158
+ x = self.layer3(x)
159
+ x = self.layer4(x)
160
+ x = self.attnpool(x)
161
+
162
+ return x
163
+
164
+
165
+ class LayerNorm(nn.LayerNorm):
166
+ """Subclass torch's LayerNorm to handle fp16."""
167
+
168
+ def forward(self, x: torch.Tensor):
169
+ orig_type = x.dtype
170
+ ret = super().forward(x.type(torch.float32))
171
+ return ret.type(orig_type)
172
+
173
+
174
+ class QuickGELU(nn.Module):
175
+ def forward(self, x: torch.Tensor):
176
+ return x * torch.sigmoid(1.702 * x)
177
+
178
+
179
+ class ResidualAttentionBlock(nn.Module):
180
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
181
+ super().__init__()
182
+
183
+ self.attn = nn.MultiheadAttention(d_model, n_head)
184
+ self.ln_1 = LayerNorm(d_model)
185
+ self.mlp = nn.Sequential(OrderedDict([
186
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
187
+ ("gelu", QuickGELU()),
188
+ ("c_proj", nn.Linear(d_model * 4, d_model))
189
+ ]))
190
+ self.ln_2 = LayerNorm(d_model)
191
+ self.attn_mask = attn_mask
192
+
193
+ def attention(self, x: torch.Tensor, padding_mask: torch.Tensor = None):
194
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
195
+ return self.attn(x, x, x, need_weights=True, attn_mask=self.attn_mask, key_padding_mask=padding_mask,
196
+ average_attn_weights=False)
197
+
198
+ def forward(self, x: torch.Tensor):
199
+ attention_res = self.attention(self.ln_1(x))
200
+ x, weights = x + attention_res[0], attention_res[1]
201
+ x = x + self.mlp(self.ln_2(x))
202
+ return x, weights
203
+
204
+
205
+ class Transformer(nn.Module):
206
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
207
+ super().__init__()
208
+ self.width = width
209
+ self.layers = layers
210
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
211
+
212
+ def forward(self, x: torch.Tensor):
213
+ weights_all_blocks = []
214
+
215
+ # Go through all the blocks (modules)
216
+ for block in self.resblocks:
217
+ x, weight = block(x)
218
+ weights_all_blocks.append(weight)
219
+
220
+ return x, torch.stack(weights_all_blocks)
221
+
222
+
223
+ class VisionTransformer(nn.Module):
224
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int,
225
+ in_channels: int = 3):
226
+ super().__init__()
227
+ self.input_resolution = input_resolution
228
+ self.output_dim = output_dim
229
+ self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=width, kernel_size=patch_size, stride=patch_size,
230
+ bias=False)
231
+
232
+ scale = width ** -0.5
233
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
234
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
235
+ self.ln_pre = LayerNorm(width)
236
+
237
+ self.transformer = Transformer(width, layers, heads)
238
+
239
+ self.ln_post = LayerNorm(width)
240
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
241
+
242
+ def forward(self, x: torch.Tensor, output_all_features: bool = False, output_attention_map: bool = False):
243
+ x = self.conv1(x) # shape = [*, width, grid, grid]
244
+ grid = x.size(2)
245
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
246
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
247
+ x = torch.cat(
248
+ [self.class_embedding.to(x.dtype) +
249
+ torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x],
250
+ dim=1
251
+ ) # shape = [*, grid ** 2 + 1, width]
252
+ x = x + self.positional_embedding.to(x.dtype)
253
+ x = self.ln_pre(x)
254
+
255
+ x = x.permute(1, 0, 2) # NLD -> LND
256
+ x, attn = self.transformer(x)
257
+ x = x.permute(1, 0, 2) # LND -> NLD
258
+
259
+ cls_feature = self.ln_post(x[:, 0, :]) @ self.proj
260
+
261
+ outputs = (cls_feature,)
262
+ if output_all_features:
263
+ # cls token is not included
264
+ outputs += (x[:, 1:, :],)
265
+ if output_attention_map:
266
+ # attention_map: n_layers, batch_size, n_heads, h, w
267
+ outputs += (einops.rearrange(attn[:, :, :, 0, 1:],
268
+ "n_layers b n_heads (h w)->n_layers b n_heads h w", h=grid, w=grid),)
269
+ return outputs
270
+
271
+
272
+ class CrossResidualAttentionBlock(ResidualAttentionBlock):
273
+ """modified version of ResidualAttentionBlock to support the encoder-decoder attention between I-frame tokens and
274
+ motion vector/residual"""
275
+
276
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
277
+ enc_dec_attn_mask: torch.Tensor = None):
278
+ super().__init__(d_model=d_model, n_head=n_head, attn_mask=attn_mask)
279
+ self.attn2 = nn.MultiheadAttention(d_model, n_head)
280
+ self.ln_3 = LayerNorm(d_model)
281
+ self.ln_4 = LayerNorm(d_model)
282
+ self.enc_dec_attn_mask = enc_dec_attn_mask
283
+
284
+ def enc_dec_attention(self, highway: torch.Tensor, iframe: torch.Tensor):
285
+ self.enc_dec_attn_mask = self.enc_dec_attn_mask.to(dtype=highway.dtype,
286
+ device=highway.device) if self.enc_dec_attn_mask is not None else None
287
+ return self.attn2(highway, iframe, iframe, need_weights=False, attn_mask=self.enc_dec_attn_mask)[0]
288
+
289
+ def forward(self, x: [torch.Tensor, torch.Tensor, torch.LongTensor]):
290
+ highway, iframe, self_mask = x
291
+ # self-attention
292
+ out, attn = self.attention(self.ln_1(highway), padding_mask=self_mask)
293
+ x = highway + out
294
+ # enc-dec attention
295
+ if iframe is not None:
296
+ x = x + self.enc_dec_attention(self.ln_3(x), self.ln_4(iframe))
297
+ # mlp
298
+ x = x + self.mlp(self.ln_2(x))
299
+ return [x, iframe, self_mask]
300
+
301
+
302
+ class CLIP(nn.Module):
303
+ def __init__(self,
304
+ embed_dim: int,
305
+ # vision
306
+ image_resolution: int,
307
+ vision_layers: Union[Tuple[int, int, int, int], int],
308
+ vision_width: int,
309
+ vision_patch_size: int,
310
+ # text
311
+ context_length: int,
312
+ vocab_size: int,
313
+ transformer_width: int,
314
+ transformer_heads: int,
315
+ transformer_layers: int
316
+ ):
317
+ super().__init__()
318
+
319
+ self.context_length = context_length
320
+
321
+ if isinstance(vision_layers, (tuple, list)):
322
+ vision_heads = vision_width * 32 // 64
323
+ self.visual = ModifiedResNet(
324
+ layers=vision_layers,
325
+ output_dim=embed_dim,
326
+ heads=vision_heads,
327
+ input_resolution=image_resolution,
328
+ width=vision_width
329
+ )
330
+ else:
331
+ vision_heads = vision_width // 64
332
+ self.visual = VisionTransformer(
333
+ input_resolution=image_resolution,
334
+ patch_size=vision_patch_size,
335
+ width=vision_width,
336
+ layers=vision_layers,
337
+ heads=vision_heads,
338
+ output_dim=embed_dim
339
+ )
340
+
341
+ self.transformer = Transformer(
342
+ width=transformer_width,
343
+ layers=transformer_layers,
344
+ heads=transformer_heads,
345
+ attn_mask=self.build_attention_mask()
346
+ )
347
+
348
+ self.vocab_size = vocab_size
349
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
350
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
351
+ self.ln_final = LayerNorm(transformer_width)
352
+
353
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
354
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
355
+
356
+ self.initialize_parameters()
357
+
358
+ def initialize_parameters(self):
359
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
360
+ nn.init.normal_(self.positional_embedding, std=0.01)
361
+
362
+ if isinstance(self.visual, ModifiedResNet):
363
+ if self.visual.attnpool is not None:
364
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
365
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
366
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
367
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
368
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
369
+
370
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
371
+ for name, param in resnet_block.named_parameters():
372
+ if name.endswith("bn3.weight"):
373
+ nn.init.zeros_(param)
374
+
375
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
376
+ attn_std = self.transformer.width ** -0.5
377
+ fc_std = (2 * self.transformer.width) ** -0.5
378
+ for block in self.transformer.resblocks:
379
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
380
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
381
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
382
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
383
+
384
+ if self.text_projection is not None:
385
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
386
+
387
+ def build_attention_mask(self):
388
+ # lazily create causal attention mask, with full attention between the vision tokens
389
+ # pytorch uses additive attention mask; fill with -inf
390
+ mask = torch.empty(self.context_length, self.context_length)
391
+ mask.fill_(float("-inf"))
392
+ mask.triu_(1) # zero out the lower diagonal
393
+ return mask
394
+
395
+ @property
396
+ def dtype(self):
397
+ return self.visual.conv1.weight.dtype
398
+
399
+ def encode_image(self, image, output_all_features=False, output_attention_map=False):
400
+ return self.visual(image.type(self.dtype), output_all_features, output_attention_map)
401
+
402
+ def encode_text(self, text, output_all_features=False):
403
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
404
+
405
+ x = x + self.positional_embedding.type(self.dtype)
406
+ x = x.permute(1, 0, 2) # NLD -> LND
407
+ x, _ = self.transformer(x)
408
+ x = x.permute(1, 0, 2) # LND -> NLD
409
+ x = self.ln_final(x).type(self.dtype)
410
+
411
+ # x.shape = [batch_size, n_ctx, transformer.width]
412
+ if output_all_features:
413
+ return x
414
+ else:
415
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
416
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
417
+
418
+ return x
419
+
420
+ def forward(self, image, text):
421
+ image_features = self.encode_image(image)
422
+ text_features = self.encode_text(text)
423
+
424
+ # normalized features
425
+ image_features = image_features / image_features.norm(dim=1, keepdim=True)
426
+ text_features = text_features / text_features.norm(dim=1, keepdim=True)
427
+
428
+ # cosine similarity as logits
429
+ logit_scale = self.logit_scale.exp()
430
+ logits_per_image = logit_scale * image_features @ text_features.t()
431
+ logits_per_text = logits_per_image.t()
432
+
433
+ # shape = [global_batch_size, global_batch_size]
434
+ return logits_per_image, logits_per_text
435
+
436
+
437
+ def convert_weights(model: nn.Module):
438
+ """Convert applicable model parameters to fp16"""
439
+
440
+ def _convert_weights_to_fp16(l):
441
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
442
+ l.weight.data = l.weight.data.half()
443
+ if l.bias is not None:
444
+ l.bias.data = l.bias.data.half()
445
+
446
+ if isinstance(l, nn.MultiheadAttention):
447
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
448
+ tensor = getattr(l, attr)
449
+ if tensor is not None:
450
+ tensor.data = tensor.data.half()
451
+
452
+ for name in ["text_projection", "proj"]:
453
+ if hasattr(l, name):
454
+ attr = getattr(l, name)
455
+ if attr is not None:
456
+ attr.data = attr.data.half()
457
+
458
+ model.apply(_convert_weights_to_fp16)
459
+
460
+
461
+ def build_model(state_dict: dict):
462
+ vit = "visual.proj" in state_dict
463
+
464
+ if vit:
465
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
466
+ vision_layers = len(
467
+ [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
468
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
469
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
470
+ image_resolution = vision_patch_size * grid_size
471
+ else:
472
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in
473
+ [1, 2, 3, 4]]
474
+ vision_layers = tuple(counts)
475
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
476
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
477
+ vision_patch_size = None
478
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
479
+ image_resolution = output_width * 32
480
+
481
+ embed_dim = state_dict["text_projection"].shape[1]
482
+ context_length = state_dict["positional_embedding"].shape[0]
483
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
484
+ transformer_width = state_dict["ln_final.weight"].shape[0]
485
+ transformer_heads = transformer_width // 64
486
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
487
+
488
+ model = CLIP(
489
+ embed_dim,
490
+ image_resolution, vision_layers, vision_width, vision_patch_size,
491
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
492
+ )
493
+
494
+ for key in ["input_resolution", "context_length", "vocab_size"]:
495
+ if key in state_dict:
496
+ del state_dict[key]
497
+
498
+ convert_weights(model)
499
+ model.load_state_dict(state_dict)
500
+ return model.eval()
CoCap/cocap/modules/clip/simple_tokenizer.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import html
3
+ import os
4
+ from functools import lru_cache
5
+
6
+ import ftfy
7
+ import regex as re
8
+
9
+
10
+ @lru_cache()
11
+ def default_bpe():
12
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
+
14
+
15
+ @lru_cache()
16
+ def bytes_to_unicode():
17
+ """
18
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
19
+ The reversible bpe codes work on unicode strings.
20
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
23
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
25
+ """
26
+ bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
27
+ cs = bs[:]
28
+ n = 0
29
+ for b in range(2 ** 8):
30
+ if b not in bs:
31
+ bs.append(b)
32
+ cs.append(2 ** 8 + n)
33
+ n += 1
34
+ cs = [chr(n) for n in cs]
35
+ return dict(zip(bs, cs))
36
+
37
+
38
+ def get_pairs(word):
39
+ """Return set of symbol pairs in a word.
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def basic_clean(text):
51
+ text = ftfy.fix_text(text)
52
+ text = html.unescape(html.unescape(text))
53
+ return text.strip()
54
+
55
+
56
+ def whitespace_clean(text):
57
+ text = re.sub(r'\s+', ' ', text)
58
+ text = text.strip()
59
+ return text
60
+
61
+
62
+ class SimpleTokenizer(object):
63
+ def __init__(self, bpe_path: str = default_bpe()):
64
+ self.byte_encoder = bytes_to_unicode()
65
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
+ merges = merges[1:49152 - 256 - 2 + 1]
68
+ merges = [tuple(merge.split()) for merge in merges]
69
+ vocab = list(bytes_to_unicode().values())
70
+ vocab = vocab + [v + '</w>' for v in vocab]
71
+ for merge in merges:
72
+ vocab.append(''.join(merge))
73
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
+ self.encoder = dict(zip(vocab, range(len(vocab))))
75
+ self.decoder = {v: k for k, v in self.encoder.items()}
76
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
+ self.pat = re.compile(
79
+ r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
80
+ re.IGNORECASE)
81
+
82
+ def bpe(self, token):
83
+ if token in self.cache:
84
+ return self.cache[token]
85
+ word = tuple(token[:-1]) + (token[-1] + '</w>',)
86
+ pairs = get_pairs(word)
87
+
88
+ if not pairs:
89
+ return token + '</w>'
90
+
91
+ while True:
92
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
93
+ if bigram not in self.bpe_ranks:
94
+ break
95
+ first, second = bigram
96
+ new_word = []
97
+ i = 0
98
+ while i < len(word):
99
+ try:
100
+ j = word.index(first, i)
101
+ new_word.extend(word[i:j])
102
+ i = j
103
+ except:
104
+ new_word.extend(word[i:])
105
+ break
106
+
107
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
108
+ new_word.append(first + second)
109
+ i += 2
110
+ else:
111
+ new_word.append(word[i])
112
+ i += 1
113
+ new_word = tuple(new_word)
114
+ word = new_word
115
+ if len(word) == 1:
116
+ break
117
+ else:
118
+ pairs = get_pairs(word)
119
+ word = ' '.join(word)
120
+ self.cache[token] = word
121
+ return word
122
+
123
+ def encode(self, text):
124
+ bpe_tokens = []
125
+ text = whitespace_clean(basic_clean(text)).lower()
126
+ for token in re.findall(self.pat, text):
127
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
128
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
129
+ return bpe_tokens
130
+
131
+ def decode(self, tokens):
132
+ text = ''.join([self.decoder[token] for token in tokens])
133
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
134
+ return text
CoCap/cocap/modules/compressed_video/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 4/24/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
6
+
7
+ from .compressed_video_captioner import *
8
+ from .compressed_video_transformer import *
CoCap/cocap/modules/compressed_video/compressed_video_captioner.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 8/6/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : compressed_video_captioner.py
6
+
7
+ __all__ = [
8
+ "CaptionHead",
9
+ "CompressedVideoCaptioner",
10
+ "caption_head_cfg",
11
+ "caption_head_pretrained_cfg",
12
+ "compressed_video_captioner_cfg",
13
+ "compressed_video_captioner_pretrained_cfg",
14
+ ]
15
+
16
+ import logging
17
+ from typing import *
18
+
19
+ import numpy as np
20
+ import torch
21
+ from easydict import EasyDict as edict
22
+ from hydra_zen import builds
23
+ from torch import Tensor
24
+ from torch import nn
25
+
26
+ from cocap.modules.bert import BertSelfEncoder, BertLMPredictionHead
27
+ from cocap.modules.clip.clip import get_model_path
28
+ from cocap.modules.clip.model import CLIP
29
+ from cocap.modules.compressed_video.compressed_video_transformer import CompressedVideoTransformer, \
30
+ compressed_video_transformer_pretrained_cfg, compressed_video_transformer_cfg
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ class CaptionHead(nn.Module):
36
+
37
+ def __init__(
38
+ self,
39
+ word_embedding_size: int, visual_feature_size: int,
40
+ max_v_len: int, max_t_len: int, hidden_size: int,
41
+ vocab_size: int, verbose: Optional[Union[int, bool]] = False
42
+ ):
43
+ super(CaptionHead, self).__init__()
44
+ self.model_network = "Self"
45
+ self.cap_config = edict(
46
+ word_vec_size=word_embedding_size,
47
+ max_v_len=max_v_len,
48
+ max_t_len=max_t_len,
49
+ hidden_size=hidden_size,
50
+ video_feature_size=visual_feature_size,
51
+ layer_norm_eps=1e-12, # bert layernorm
52
+ hidden_dropout_prob=0.1, # applies everywhere except attention
53
+ num_hidden_layers=2, # number of transformer modules
54
+ num_attention_heads=8,
55
+ share_wd_cls_weight=False,
56
+ vocab_size=vocab_size,
57
+ BOS_id=vocab_size - 2,
58
+ EOS_id=vocab_size - 1,
59
+ PAD_id=0
60
+ )
61
+ logger.debug("Caption Head Configuration: %s", self.cap_config)
62
+ self.cap_sa_decoder = BertSelfEncoder(self.cap_config)
63
+ self.prediction_head = BertLMPredictionHead(self.cap_config, self.cap_sa_decoder.word_embeddings.weight)
64
+ # debug output cfgs
65
+ if verbose:
66
+ if isinstance(verbose, bool):
67
+ self.log_interval = 1
68
+ else:
69
+ self.log_interval = int(verbose)
70
+ else:
71
+ self.log_interval = float("inf")
72
+ self.step_counter = 1
73
+
74
+ @staticmethod
75
+ @torch.no_grad()
76
+ def probability2text(predict_scores=None):
77
+ predict_ids = predict_scores.max(-1)[1]
78
+ return CaptionHead.ids2text(predict_ids)
79
+
80
+ @staticmethod
81
+ @torch.no_grad()
82
+ def ids2text(gt_ids: Union[np.ndarray, Tensor]):
83
+ from cocap.trainer.cocap_trainer import convert_ids_to_sentence
84
+ if isinstance(gt_ids, np.ndarray) or isinstance(gt_ids, Tensor):
85
+ assert 0 < len(gt_ids.shape) <= 2, f"gt_ids should be a 1 dim or 2 dim array/tensor, got {gt_ids.shape}"
86
+ else:
87
+ raise ValueError("gt_ids should be np.ndarray or Tensor")
88
+ if isinstance(gt_ids, Tensor):
89
+ gt_ids = gt_ids.detach().cpu().numpy()
90
+ if len(gt_ids.shape) == 1:
91
+ return convert_ids_to_sentence(gt_ids.tolist())
92
+ else:
93
+ return [convert_ids_to_sentence(_gt_ids) for _gt_ids in gt_ids.tolist()]
94
+
95
+ def forward(self, visual_output, input_ids, input_mask):
96
+ assert input_ids.size(1) == self.cap_config.max_t_len, f"{input_ids.size(1)} vs {self.cap_config.max_t_len}"
97
+
98
+ input_types = torch.concat(
99
+ [
100
+ torch.full((visual_output["feature_context"].size(0), visual_output["feature_context"].size(1)),
101
+ fill_value=1, dtype=torch.long, device=visual_output["feature_context"].device),
102
+ torch.full((visual_output["feature_action"].size(0), visual_output["feature_action"].size(1)),
103
+ fill_value=0, dtype=torch.long, device=visual_output["feature_action"].device),
104
+ torch.full((input_ids.size(0), input_ids.size(1)),
105
+ fill_value=2, dtype=torch.long, device=input_ids.device)
106
+ ], dim=1
107
+ )
108
+ visual_output = torch.cat([visual_output["feature_context"], visual_output["feature_action"]], dim=1)
109
+ input_mask = torch.concat(
110
+ [
111
+ torch.ones(size=(visual_output.size(0), visual_output.size(1)),
112
+ dtype=torch.long, device=visual_output.device),
113
+ input_mask
114
+ ], dim=1
115
+ )
116
+ hidden = self.cap_sa_decoder.forward(visual_output, input_ids, input_mask, input_types)
117
+ prediction_scores = self.prediction_head(hidden[:, -self.cap_config.max_t_len:])
118
+ if self.step_counter % self.log_interval == 0:
119
+ logger.debug("GT : %s", self.ids2text(input_ids))
120
+ logger.debug("Pred: %s", self.probability2text(prediction_scores))
121
+ self.step_counter += 1
122
+ return prediction_scores
123
+
124
+ @classmethod
125
+ def from_pretrained(
126
+ cls,
127
+ pretrained_clip_name_or_path: str = "ViT-B/16", max_v_len: int = 8 * 2, max_t_len: int = 77,
128
+ verbose: Optional[Union[int, bool]] = False
129
+ ):
130
+ model_path = get_model_path(pretrained_clip_name_or_path, download_root="model_zoo/clip_model")
131
+ pretrained_model: CLIP = torch.jit.load(model_path, map_location="cpu")
132
+ state_dict = pretrained_model.state_dict()
133
+
134
+ embed_dim = state_dict["text_projection"].shape[1]
135
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
136
+ transformer_width = state_dict["ln_final.weight"].shape[0]
137
+
138
+ head = cls(
139
+ word_embedding_size=transformer_width,
140
+ visual_feature_size=embed_dim,
141
+ max_v_len=max_v_len,
142
+ max_t_len=max_t_len,
143
+ hidden_size=embed_dim,
144
+ vocab_size=vocab_size,
145
+ verbose=verbose
146
+ )
147
+ logger.debug(
148
+ "Pretrained embedding parameters: %s",
149
+ [k for k, v in state_dict.items() if k.startswith("token_embedding")]
150
+ )
151
+ pretrained_embedding = {k.lstrip("token_embedding."): v for k, v in state_dict.items()
152
+ if k.startswith("token_embedding")}
153
+ head.cap_sa_decoder.word_embeddings.load_state_dict(pretrained_embedding, strict=True)
154
+ head.prediction_head.decoder.load_state_dict(pretrained_embedding, strict=True)
155
+ assert torch.equal(head.cap_sa_decoder.word_embeddings.weight, head.prediction_head.decoder.weight)
156
+ return head
157
+
158
+
159
+ class CompressedVideoCaptioner(nn.Module):
160
+
161
+ def __init__(
162
+ self,
163
+ compressed_video_transformer: CompressedVideoTransformer,
164
+ caption_head: CaptionHead,
165
+ motion_dropout_prob: float = 0.2,
166
+ residual_dropout_prob: float = 0.2,
167
+ ):
168
+ super().__init__()
169
+ self.compressed_video_transformer = compressed_video_transformer
170
+ self.caption_head = caption_head
171
+
172
+ self.dropout_motion = nn.Dropout(motion_dropout_prob)
173
+ self.dropout_residual = nn.Dropout(residual_dropout_prob)
174
+
175
+ def forward(self, inputs: Dict[str, Union[Tensor, Dict[str, Tensor]]]):
176
+ """
177
+
178
+ :param inputs:
179
+ video:
180
+ iframe: batch_size n_gop c h w
181
+ motion_vector: batch_size n_gop n_mv c=4|9 h/4 w/4
182
+ residual: batch_size n_gop n_res c h w
183
+ input_mask_gop: batch_size n_gop
184
+ input_mask_mv: batch_size n_gop n_mv
185
+ :return:
186
+ """
187
+ if "visual_output" not in inputs:
188
+ iframe = inputs["video"]["iframe"]
189
+ motion = inputs["video"]["motion_vector"]
190
+ residual = inputs["video"]["residual"] / 128 - 1 # for saving memory
191
+ bp_type_ids = inputs["video"]["type_ids_mv"]
192
+
193
+ motion = self.dropout_motion(motion)
194
+ residual = self.dropout_residual(residual)
195
+ compressed_visual_features = self.compressed_video_transformer(
196
+ iframe=iframe,
197
+ motion=motion,
198
+ residual=residual,
199
+ bp_type_ids=bp_type_ids
200
+ )
201
+ else:
202
+ # reuse pre-extracted visual features
203
+ compressed_visual_features = inputs["visual_output"]
204
+
205
+ prediction_scores = self.caption_head(
206
+ compressed_visual_features,
207
+ inputs["input_ids"],
208
+ inputs["input_mask"],
209
+ )
210
+ return {"prediction_scores": prediction_scores, "visual_output": compressed_visual_features}
211
+
212
+
213
+ # Build configs for organizing modules with hydra
214
+ caption_head_cfg = builds(CaptionHead, populate_full_signature=True)
215
+ caption_head_pretrained_cfg = builds(CaptionHead.from_pretrained, populate_full_signature=True)
216
+
217
+ compressed_video_captioner_cfg = builds(
218
+ CompressedVideoCaptioner,
219
+ compressed_video_transformer=compressed_video_transformer_cfg,
220
+ caption_head=caption_head_cfg,
221
+ populate_full_signature=True
222
+ )
223
+ compressed_video_captioner_pretrained_cfg = builds(
224
+ CompressedVideoCaptioner,
225
+ compressed_video_transformer=compressed_video_transformer_pretrained_cfg,
226
+ caption_head=caption_head_pretrained_cfg,
227
+ populate_full_signature=True
228
+ )
CoCap/cocap/modules/compressed_video/compressed_video_transformer.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 8/2/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : compressed_video_transformer.py
6
+
7
+ __all__ = [
8
+ "IFrameEncoder",
9
+ "ActionEncoder",
10
+ "CompressedVideoTransformer",
11
+ "iframe_encoder_cfg",
12
+ "iframe_encoder_pretrained_cfg",
13
+ "action_encoder_cfg",
14
+ "motion_encoder_cfg",
15
+ "compressed_video_transformer_cfg",
16
+ "compressed_video_transformer_pretrained_cfg",
17
+ ]
18
+
19
+ import logging
20
+ from typing import Optional, Dict, Tuple
21
+
22
+ import einops
23
+ import torch
24
+ import torch.nn as nn
25
+ from hydra_zen import builds
26
+
27
+ from cocap.modules.clip.clip import get_model_path
28
+ from cocap.modules.clip.model import VisionTransformer, CrossResidualAttentionBlock, LayerNorm, CLIP
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class IFrameEncoder(VisionTransformer):
34
+ def __init__(
35
+ self,
36
+ input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int,
37
+ in_channels: int = 3
38
+ ):
39
+ super().__init__(
40
+ input_resolution=input_resolution, patch_size=patch_size, width=width, layers=layers, heads=heads,
41
+ output_dim=output_dim, in_channels=in_channels
42
+ )
43
+
44
+ scale = width ** -0.5
45
+ self.ln_post_hidden = LayerNorm(width)
46
+ self.proj_hidden = nn.Parameter(scale * torch.randn(width, output_dim))
47
+
48
+ def forward(self, x: torch.Tensor, output_all_features: bool = False, output_attention_map: bool = False):
49
+ x = self.conv1(x) # shape = [*, width, grid, grid]
50
+ grid = x.size(2)
51
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
52
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
53
+ x = torch.cat(
54
+ [self.class_embedding.to(x.dtype) +
55
+ torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x],
56
+ dim=1
57
+ ) # shape = [*, grid ** 2 + 1, width]
58
+ x = x + self.positional_embedding.to(x.dtype)
59
+ x = self.ln_pre(x)
60
+
61
+ x = x.permute(1, 0, 2) # NLD -> LND
62
+ x, attn = self.transformer(x)
63
+ x = x.permute(1, 0, 2) # LND -> NLD
64
+
65
+ cls_feature = self.ln_post(x[:, 0, :]) @ self.proj
66
+
67
+ outputs = (cls_feature,)
68
+ if output_all_features:
69
+ # cls token is not included
70
+ outputs += (self.ln_post_hidden(x[:, 1:, :]) @ self.proj_hidden,)
71
+ if output_attention_map:
72
+ # attention_map: n_layers, batch_size, n_heads, h, w
73
+ outputs += (einops.rearrange(attn[:, :, :, 0, 1:],
74
+ "n_layers b n_heads (h w)->n_layers b n_heads h w", h=grid, w=grid),)
75
+ return outputs
76
+
77
+ @classmethod
78
+ def from_pretrained(cls, pretrained_clip_name_or_path: str) -> Tuple["IFrameEncoder", int, int, int]:
79
+ """
80
+ Load from pretrained CLIP model
81
+ :param pretrained_clip_name_or_path: the name of pretrained CLIP model
82
+ :return: IFrameEncoder, image_resolution, vision_width, embed_dim
83
+ """
84
+ model_path = get_model_path(pretrained_clip_name_or_path)
85
+ pretrained_model: CLIP = torch.jit.load(model_path, map_location="cpu")
86
+ state_dict = pretrained_model.state_dict()
87
+
88
+ vision_width: int = state_dict["visual.conv1.weight"].shape[0]
89
+ vision_layers: int = len([k for k in state_dict.keys()
90
+ if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
91
+ embed_dim: int = state_dict["text_projection"].shape[1]
92
+ vision_patch_size: int = state_dict["visual.conv1.weight"].shape[-1]
93
+ grid_size: int = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
94
+ image_resolution: int = vision_patch_size * grid_size
95
+ vision_heads = vision_width // 64
96
+
97
+ rgb_encoder = cls(
98
+ input_resolution=image_resolution,
99
+ patch_size=vision_patch_size,
100
+ width=vision_width, layers=vision_layers, heads=vision_heads,
101
+ output_dim=embed_dim
102
+ )
103
+ visual_state_dict = pretrained_model.visual.state_dict()
104
+ # manually build the state dict to make sure pretrained weights are loaded exactly
105
+ visual_state_dict.update({k: v for k, v in rgb_encoder.state_dict().items() if k.startswith("ln_post_hidden")})
106
+ visual_state_dict.update({k: v for k, v in rgb_encoder.state_dict().items() if k.startswith("proj_hidden")})
107
+ rgb_encoder.load_state_dict(visual_state_dict, strict=True)
108
+
109
+ return rgb_encoder, image_resolution, vision_width, embed_dim
110
+
111
+
112
+ class ActionEncoder(nn.Module):
113
+ def __init__(self, width: int, layers: int, heads: int, n_bp: int, n_bp_type: int, attn_mask: torch.Tensor = None):
114
+ super().__init__()
115
+ self.width = width
116
+ self.layers = layers
117
+ self.resblocks = nn.Sequential(*[CrossResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
118
+
119
+ self.positional_embedding = nn.Embedding(n_bp, width)
120
+ self.bp_type_embedding = nn.Embedding(n_bp_type, width)
121
+
122
+ self.ln_post = LayerNorm(width)
123
+
124
+ def forward(
125
+ self,
126
+ feature_bp: torch.FloatTensor,
127
+ bp_type_ids: torch.LongTensor,
128
+ feature_ctx: torch.FloatTensor = None,
129
+ self_mask: torch.Tensor = None
130
+ ):
131
+ """
132
+ Fuse I-frames, motion vectors and residuals.
133
+ The feature dimension of feature_bp and feature_ctx are the same.
134
+ :param feature_bp: (bsz n_gop) n_bp c
135
+ :param bp_type_ids: (bsz n_gop) n_bp
136
+ :param feature_ctx: (bsz n_gop) (h w) c
137
+ :param self_mask: attention mask
138
+ :return:
139
+ """
140
+ assert feature_bp.size(1) == self.positional_embedding.num_embeddings
141
+ bsz = feature_bp.size(0)
142
+
143
+ positional_embedding = self.positional_embedding(
144
+ torch.LongTensor(list(range(feature_bp.size(1)))).to(feature_bp.device).unsqueeze(0).repeat(bsz, 1)
145
+ )
146
+ bp_type_embedding = self.bp_type_embedding(bp_type_ids)
147
+ feature_bp = feature_bp + positional_embedding + bp_type_embedding
148
+
149
+ feature_bp, _, _ = self.resblocks([feature_bp.permute(1, 0, 2), feature_ctx.permute(1, 0, 2), self_mask])
150
+ feature_bp = torch.mean(feature_bp.permute(1, 0, 2), dim=1) # pooling
151
+ return self.ln_post(feature_bp)
152
+
153
+
154
+ class CompressedVideoTransformer(nn.Module):
155
+ def __init__(
156
+ self,
157
+ rgb_encoder: nn.Module,
158
+ motion_encoder: Optional[nn.Module],
159
+ residual_encoder: Optional[nn.Module],
160
+ action_encoder: Optional[nn.Module],
161
+ output_dim: int,
162
+ ):
163
+ """
164
+ Encode visual feature from video compressed domain
165
+ :param rgb_encoder:
166
+ :param motion_encoder:
167
+ :param residual_encoder:
168
+ :param action_encoder: ActionEncoder used to fuse the rgb, motion and residual features
169
+ :param output_dim: width of output visual feature
170
+ """
171
+ super().__init__()
172
+ assert motion_encoder is not None or residual_encoder is not None
173
+ self.rgb_encoder = rgb_encoder
174
+ self.motion_encoder = motion_encoder
175
+ self.residual_encoder = residual_encoder
176
+ self.action_encoder = action_encoder
177
+ self.output_dim = output_dim
178
+
179
+ def forward(
180
+ self,
181
+ iframe: torch.FloatTensor,
182
+ motion: torch.FloatTensor,
183
+ residual: torch.FloatTensor,
184
+ bp_type_ids: torch.LongTensor
185
+ ) -> Dict[str, torch.Tensor]:
186
+ """
187
+
188
+ :param iframe: bsz n_gop c h w
189
+ :param motion: bsz n_gop n_bp c_mv h/4 w/4
190
+ :param residual: bsz n_gop n_bp c h w
191
+ :param bp_type_ids: bsz n_gop n_bp
192
+ """
193
+ # a long list of dimension check
194
+ assert (len(iframe.shape) == 5 and len(motion.shape) == 6 and len(residual.shape) == 6 and
195
+ len(bp_type_ids.shape) == 3)
196
+ assert iframe.size(0) == motion.size(0) == residual.size(0) == bp_type_ids.size(0), "batch size should be equal"
197
+ assert iframe.size(1) == motion.size(1) == residual.size(1) == bp_type_ids.size(1), "n_gop should be equal"
198
+ assert motion.size(2) == residual.size(2) == bp_type_ids.size(2), "n_mv and n_res should be equal"
199
+ assert iframe.size(2) == 3 and motion.size(3) == 4 and residual.size(3) == 3, "channel number is not correct"
200
+ assert iframe.size(3) == residual.size(4) and motion.size(4) == iframe.size(3) // 4, "height is not correct"
201
+ assert iframe.size(4) == residual.size(5) and motion.size(5) == iframe.size(4) // 4, "width is not correct"
202
+
203
+ _bsz, n_gop, n_bp = iframe.size(0), motion.size(1), motion.size(2)
204
+
205
+ # encode iframe in batches
206
+ f_ctx_cls, f_ctx_all_hidden, iframe_attn = self.rgb_encoder(
207
+ einops.rearrange(iframe, "bsz n_gop c h w->(bsz n_gop) c h w"),
208
+ output_all_features=True, output_attention_map=True
209
+ )
210
+ f_ctx_cls = einops.rearrange(f_ctx_cls, "(bsz n_gop) c->bsz n_gop c", bsz=_bsz)
211
+ f_ctx_all_hidden = einops.rearrange(f_ctx_all_hidden, "(bsz n_gop) hw c->bsz n_gop hw c", bsz=_bsz)
212
+ iframe_attn = einops.rearrange(
213
+ iframe_attn, "n_layers (bsz n_gop) n_heads h w->n_layers bsz n_gop n_heads h w",
214
+ bsz=_bsz
215
+ )
216
+ # encode motion in batches
217
+ mv_cls, mv_attn = self.motion_encoder(
218
+ einops.rearrange(motion, "bsz n_gop n_bp c_mv h_4 w_4->(bsz n_gop n_bp) c_mv h_4 w_4"),
219
+ output_all_features=False, output_attention_map=True
220
+ )
221
+ mv_cls = einops.rearrange(mv_cls, "(bsz n_gop n_bp) c->bsz n_gop n_bp c",
222
+ bsz=_bsz, n_gop=n_gop, n_bp=n_bp)
223
+ mv_attn = einops.rearrange(
224
+ mv_attn, "n_layers (bsz n_gop n_bp) n_heads h_4 w_4->n_layers bsz n_gop n_bp n_heads h_4 w_4",
225
+ bsz=_bsz, n_gop=n_gop, n_bp=n_bp
226
+ )
227
+ # encode residual in batches
228
+ res_cls, res_attn = self.residual_encoder(
229
+ einops.rearrange(residual, "bsz n_gop n_bp c h w->(bsz n_gop n_bp) c h w"),
230
+ output_all_features=False, output_attention_map=True
231
+ )
232
+ res_cls = einops.rearrange(res_cls, "(bsz n_gop n_bp) c->bsz n_gop n_bp c",
233
+ bsz=_bsz, n_gop=n_gop, n_bp=n_bp)
234
+ res_attn = einops.rearrange(
235
+ res_attn, "n_layers (bsz n_gop n_bp) n_heads h w->n_layers bsz n_gop n_bp n_heads h w",
236
+ bsz=_bsz, n_gop=n_gop, n_bp=n_bp
237
+ )
238
+
239
+ # fuse rgb, mv and res features through action encoder
240
+ f_bp = mv_cls + res_cls
241
+ f_act = self.action_encoder( # squeeze n_gop into batch before forwarding
242
+ einops.rearrange(f_bp, "bsz n_gop n_bp c->(bsz n_gop) n_bp c"),
243
+ einops.rearrange(bp_type_ids, "bsz n_gop n_bp->(bsz n_gop) n_bp"),
244
+ einops.rearrange(f_ctx_all_hidden, "bsz n_gop hw c->(bsz n_gop) hw c")
245
+ )
246
+ f_act = einops.rearrange(f_act, "(bsz n_gop) c->bsz n_gop c", bsz=_bsz, n_gop=n_gop)
247
+
248
+ return {
249
+ "feature_context": f_ctx_cls,
250
+ "feature_action": f_act,
251
+ "iframe_attention_map": iframe_attn,
252
+ "motion_vector_attention_map": mv_attn,
253
+ "residual_attention_map": res_attn,
254
+ }
255
+
256
+ @classmethod
257
+ def from_pretrained(
258
+ cls,
259
+ # rgb encoder cfgs
260
+ pretrained_clip_name_or_path: str = "ViT-B/16",
261
+ # motion encoder cfgs
262
+ motion_patch_size: int = 8, motion_layers: int = 2, motion_heads: int = 8,
263
+ # residual encoder cfgs
264
+ residual_patch_size: int = 64, residual_layers: int = 2, residual_heads: int = 8,
265
+ # action encoder cfgs
266
+ action_layers: int = 1, action_heads: int = 8, n_bp: int = 59
267
+ ):
268
+ rgb_encoder, image_resolution, vision_width, embed_dim = IFrameEncoder.from_pretrained(
269
+ pretrained_clip_name_or_path
270
+ )
271
+
272
+ motion_encoder = VisionTransformer(
273
+ input_resolution=image_resolution // 4,
274
+ patch_size=motion_patch_size,
275
+ width=vision_width // 4, layers=motion_layers, heads=motion_heads,
276
+ output_dim=embed_dim,
277
+ in_channels=4
278
+ )
279
+ residual_encoder = VisionTransformer(
280
+ input_resolution=image_resolution,
281
+ patch_size=residual_patch_size,
282
+ width=vision_width, layers=residual_layers, heads=residual_heads,
283
+ output_dim=embed_dim,
284
+ in_channels=3
285
+ )
286
+ action_encoder = ActionEncoder(
287
+ width=embed_dim, layers=action_layers, heads=action_heads, n_bp=n_bp, n_bp_type=2
288
+ )
289
+ return cls(
290
+ rgb_encoder=rgb_encoder,
291
+ motion_encoder=motion_encoder,
292
+ residual_encoder=residual_encoder,
293
+ action_encoder=action_encoder,
294
+ output_dim=embed_dim
295
+ )
296
+
297
+
298
+ # Build configs for organizing modules with hydra
299
+ iframe_encoder_cfg = builds(IFrameEncoder, populate_full_signature=True)
300
+ iframe_encoder_pretrained_cfg = builds(IFrameEncoder.from_pretrained, populate_full_signature=True)
301
+
302
+ action_encoder_cfg = builds(ActionEncoder, populate_full_signature=True)
303
+
304
+ motion_encoder_cfg = residual_encoder_cfg = builds(VisionTransformer, populate_full_signature=True)
305
+
306
+ compressed_video_transformer_cfg = builds(
307
+ CompressedVideoTransformer,
308
+ rgb_encoder=iframe_encoder_cfg,
309
+ motion_encoder=motion_encoder_cfg,
310
+ residual_encoder=residual_encoder_cfg,
311
+ action_encoder=action_encoder_cfg,
312
+ populate_full_signature=True
313
+ )
314
+ compressed_video_transformer_pretrained_cfg = builds(
315
+ CompressedVideoTransformer.from_pretrained,
316
+ populate_full_signature=True
317
+ )
CoCap/cocap/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/12 22:30
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : __init__.py
CoCap/cocap/utils/checkpoint.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/13 00:25
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : checkpoint.py
6
+
7
+
8
+ import logging
9
+ import os
10
+ from collections import OrderedDict
11
+ from typing import *
12
+
13
+ import torch
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def auto_resume(ckpt_folder):
19
+ try:
20
+ ckpt_files = [ckpt for ckpt in os.listdir(ckpt_folder) if ckpt.endswith(".pth")]
21
+ except FileNotFoundError:
22
+ ckpt_files = []
23
+ if len(ckpt_files) > 0:
24
+ return max([os.path.join(ckpt_folder, file) for file in ckpt_files], key=os.path.getmtime)
25
+ else:
26
+ return None
27
+
28
+
29
+ def save_checkpoint(ckpt_folder, epoch, model, optimizer, scheduler, config, prefix="checkpoint"):
30
+ if hasattr(model, 'module'):
31
+ model = model.module
32
+ stat_dict = {
33
+ "epoch": epoch,
34
+ "model": model.state_dict(),
35
+ "optimizer": optimizer.state_dict(),
36
+ "scheduler": scheduler.state_dict() if scheduler is not None else "",
37
+ "config": config
38
+ }
39
+ ckpt_path = os.path.join(ckpt_folder, f"{prefix}_{epoch}.pth")
40
+ os.makedirs(ckpt_folder, exist_ok=True)
41
+ torch.save(stat_dict, ckpt_path)
42
+ return ckpt_path
43
+
44
+
45
+ def load_checkpoint(ckpt_file, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, None], scheduler: Any,
46
+ restart_train=False, rewrite: Tuple[str, str] = None):
47
+ if hasattr(model, 'module'):
48
+ model = model.module
49
+ state_dict = torch.load(ckpt_file, map_location="cpu")
50
+ if rewrite is not None:
51
+ logger.info("rewrite model checkpoint prefix: %s->%s", *rewrite)
52
+ state_dict["model"] = {k.replace(*rewrite) if k.startswith(rewrite[0]) else k: v
53
+ for k, v in state_dict["model"].items()}
54
+ try:
55
+ missing = model.load_state_dict(state_dict["model"], strict=False)
56
+ logger.debug(f"checkpoint key missing: {missing}")
57
+ except RuntimeError:
58
+ print("fail to directly recover from checkpoint, try to match each modules...")
59
+ net_dict = model.state_dict()
60
+ print("find %s modules", len(state_dict["model"].items()))
61
+ missing_keys = [k for k, v in state_dict["model"].items() if k not in net_dict or net_dict[k].shape != v.shape]
62
+ print("missing key: %s", missing_keys)
63
+ state_dict["model"] = {k: v for k, v in state_dict["model"].items() if
64
+ (k in net_dict and net_dict[k].shape == v.shape)}
65
+ print("resume %s modules from checkpoint", len(state_dict["model"].items()))
66
+ net_dict.update(state_dict["model"])
67
+ model.load_state_dict(OrderedDict(net_dict))
68
+
69
+ if not restart_train:
70
+ if optimizer is not None and state_dict["optimizer"]:
71
+ optimizer.load_state_dict(state_dict["optimizer"])
72
+ if scheduler is not None and state_dict["scheduler"]:
73
+ scheduler.load_state_dict(state_dict["scheduler"])
74
+ epoch = state_dict["epoch"]
75
+ else:
76
+ logger.info("restart train, optimizer and scheduler will not be resumed")
77
+ epoch = 0
78
+
79
+ del state_dict
80
+ torch.cuda.empty_cache()
81
+ return epoch # start epoch
82
+
83
+
84
+ def save_model(model_file: str, model: torch.nn.Module):
85
+ if hasattr(model, "module"):
86
+ model = model.module
87
+ torch.save(model.state_dict(), model_file)
88
+
89
+
90
+ def load_model(model_file: str, model: torch.nn.Module, strict=True):
91
+ if hasattr(model, "module"):
92
+ model = model.module
93
+ state_dict = torch.load(model_file, map_location="cpu")
94
+
95
+ missing_keys: List[str] = []
96
+ unexpected_keys: List[str] = []
97
+ error_msgs: List[str] = []
98
+
99
+ # copy state_dict so _load_from_state_dict can modify it
100
+ metadata = getattr(state_dict, '_metadata', None)
101
+ state_dict = state_dict.copy()
102
+ if metadata is not None:
103
+ # mypy isn't aware that "_metadata" exists in state_dict
104
+ state_dict._metadata = metadata # type: ignore[attr-defined]
105
+
106
+ def load(module, prefix=''):
107
+ local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
108
+ module._load_from_state_dict(
109
+ state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
110
+ for name, child in module._modules.items():
111
+ if child is not None:
112
+ load(child, prefix + name + '.')
113
+
114
+ load(model)
115
+ del load
116
+
117
+ if len(missing_keys) > 0:
118
+ logger.info("Weights of {} not initialized from pretrained model: {}"
119
+ .format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
120
+ if len(unexpected_keys) > 0:
121
+ logger.info("Weights from pretrained model not used in {}: {}"
122
+ .format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
123
+ if len(error_msgs) > 0:
124
+ logger.info("Weights from pretrained model cause errors in {}: {}"
125
+ .format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
126
+
127
+ if len(missing_keys) == 0 and len(unexpected_keys) == 0 and len(error_msgs) == 0:
128
+ logger.info("All keys loaded successfully for {}".format(model.__class__.__name__))
129
+
130
+ if strict and len(error_msgs) > 0:
131
+ raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
132
+ model.__class__.__name__, "\n\t".join(error_msgs)))
CoCap/cocap/utils/image.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 9/5/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : image.py
6
+
7
+
8
+ from io import BytesIO
9
+
10
+ from PIL import Image
11
+
12
+ __all__ = ['byte_imread', "byte_imwrite"]
13
+
14
+
15
+ def byte_imread(data):
16
+ return Image.open(BytesIO(data))
17
+
18
+
19
+ def byte_imwrite(image, quality=100, subsampling=0):
20
+ image = Image.fromarray(image)
21
+ with BytesIO() as f:
22
+ image.save(f, format="JPEG", quality=quality, subsampling=subsampling)
23
+ data = f.getvalue()
24
+ return data
CoCap/cocap/utils/json.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2023/2/18 06:03
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : json.py
6
+
7
+ import json
8
+ from typing import Union, List, Dict
9
+
10
+
11
+ def load_json(file_path: str):
12
+ with open(file_path, "r") as f:
13
+ return json.load(f)
14
+
15
+
16
+ def save_json(data: Union[List, Dict], filename: str, save_pretty: bool = False, sort_keys: bool = False):
17
+ class MyEncoder(json.JSONEncoder):
18
+
19
+ def default(self, obj):
20
+ if isinstance(obj, bytes): # bytes->str
21
+ return str(obj, encoding='utf-8')
22
+ return json.JSONEncoder.default(self, obj)
23
+
24
+ with open(filename, "w") as f:
25
+ if save_pretty:
26
+ f.write(json.dumps(data, cls=MyEncoder, indent=4, sort_keys=sort_keys))
27
+ else:
28
+ json.dump(data, f)
CoCap/cocap/utils/logging.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/12 22:32
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : logging.py
6
+
7
+
8
+ import logging
9
+ import os
10
+
11
+ import colorlog
12
+ import torch.distributed as dist
13
+
14
+ level_dict = {
15
+ "critical": logging.CRITICAL,
16
+ "error": logging.ERROR,
17
+ "warning": logging.WARNING,
18
+ "info": logging.INFO,
19
+ "debug": logging.DEBUG,
20
+ "notset": logging.NOTSET
21
+ }
22
+
23
+
24
+ # noinspection SpellCheckingInspection
25
+ def setup_logging(cfg):
26
+ # log file
27
+ if len(str(cfg.LOG.LOGGER_FILE).split(".")) == 2:
28
+ file_name, extension = str(cfg.LOG.LOGGER_FILE).split(".")
29
+ log_file_debug = os.path.join(cfg.LOG.DIR, f"{file_name}_debug.{extension}")
30
+ log_file_info = os.path.join(cfg.LOG.DIR, f"{file_name}_info.{extension}")
31
+ elif len(str(cfg.LOG.LOGGER_FILE).split(".")) == 1:
32
+ file_name = cfg.LOG.LOGGER_FILE
33
+ log_file_debug = os.path.join(cfg.LOG.DIR, f"{file_name}_debug")
34
+ log_file_info = os.path.join(cfg.LOG.DIR, f"{file_name}_info")
35
+ else:
36
+ raise ValueError("cfg.LOG.LOGGER_FILE is invalid: %s", cfg.LOG.LOGGER_FILE)
37
+ logger = logging.getLogger(__name__.split(".")[0])
38
+ logger.setLevel(logging.DEBUG)
39
+ logger.handlers.clear()
40
+ formatter = logging.Formatter(
41
+ f"[%(asctime)s][%(levelname)s]{f'[Rank {dist.get_rank()}]' if dist.is_initialized() else ''} "
42
+ "%(filename)s: %(lineno)3d: %(message)s",
43
+ datefmt="%m/%d %H:%M:%S",
44
+ )
45
+ color_formatter = colorlog.ColoredFormatter(
46
+ f"%(log_color)s%(bold)s%(levelname)-8s%(reset)s"
47
+ f"%(log_color)s[%(asctime)s]"
48
+ f"{f'[Rank {dist.get_rank()}]' if dist.is_initialized() else ''}"
49
+ "[%(filename)s: %(lineno)3d]:%(reset)s "
50
+ "%(message)s",
51
+ datefmt="%m/%d %H:%M:%S",
52
+ )
53
+ # log file
54
+ if os.path.dirname(log_file_debug): # dir name is not empty
55
+ os.makedirs(os.path.dirname(log_file_debug), exist_ok=True)
56
+ # console
57
+ handler_console = logging.StreamHandler()
58
+ assert cfg.LOG.LOGGER_CONSOLE_LEVEL.lower() in level_dict, \
59
+ f"Log level {cfg.LOG.LOGGER_CONSOLE_LEVEL} is invalid"
60
+ handler_console.setLevel(level_dict[cfg.LOG.LOGGER_CONSOLE_LEVEL.lower()])
61
+ handler_console.setFormatter(color_formatter if cfg.LOG.LOGGER_CONSOLE_COLORFUL else formatter)
62
+ logger.addHandler(handler_console)
63
+ # debug level
64
+ handler_debug = logging.FileHandler(log_file_debug, mode="a")
65
+ handler_debug.setLevel(logging.DEBUG)
66
+ handler_debug.setFormatter(formatter)
67
+ logger.addHandler(handler_debug)
68
+ # info level
69
+ handler_info = logging.FileHandler(log_file_info, mode="a")
70
+ handler_info.setLevel(logging.INFO)
71
+ handler_info.setFormatter(formatter)
72
+ logger.addHandler(handler_info)
73
+
74
+ logger.propagate = False
75
+
76
+
77
+ def show_registry():
78
+ from cocap.data.build import DATASET_REGISTRY, COLLATE_FN_REGISTER
79
+ from cocap.modeling.model import MODEL_REGISTRY
80
+ from cocap.modeling.optimizer import OPTIMIZER_REGISTRY
81
+ from cocap.modeling.loss import LOSS_REGISTRY
82
+ from cocap.modeling.meter import METER_REGISTRY
83
+
84
+ logger = logging.getLogger(__name__)
85
+ logger.debug(DATASET_REGISTRY)
86
+ logger.debug(COLLATE_FN_REGISTER)
87
+ logger.debug(MODEL_REGISTRY)
88
+ logger.debug(OPTIMIZER_REGISTRY)
89
+ logger.debug(LOSS_REGISTRY)
90
+ logger.debug(METER_REGISTRY)
CoCap/cocap/utils/profile.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 7/12/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : profile.py
6
+
7
+ import functools
8
+ import logging
9
+ import time
10
+ from collections import defaultdict, deque
11
+
12
+ import numpy as np
13
+ import torch
14
+ from tabulate import tabulate
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def format_time(s: float) -> str:
20
+ """Return a nice string representation of `s` seconds."""
21
+ m = int(s / 60)
22
+ s -= m * 60
23
+ h = int(m / 60)
24
+ m -= h * 60
25
+ ms = int(s * 100000) / 100
26
+ s = int(s * 100) / 100.0
27
+ return ("" if h == 0 else str(h) + "h") + ("" if m == 0 else str(m) + "m") + ("" if s == 0 else str(s) + "s") + \
28
+ (str(ms) + "ms" if s == 0 else "")
29
+
30
+
31
+ class Timer(object):
32
+ def __init__(self, msg="", synchronize: bool = False, history_size: int = 1000, precision: int = 3):
33
+ """
34
+
35
+ :param msg:
36
+ :param synchronize: Call `torch.cuda.synchronize()` when getting time
37
+ :param history_size:
38
+ :param precision: round seconds to a given precision in decimal digits to avoid verbose
39
+ """
40
+ self.msg = msg
41
+ self.synchronize = synchronize
42
+ self.precision = precision
43
+
44
+ if self.msg:
45
+ logger.info("%s", msg)
46
+
47
+ self.start = self.get_time()
48
+ self.last_checkpoint = self.start
49
+
50
+ self.time_history = defaultdict(functools.partial(deque, maxlen=history_size))
51
+ self.history_size = history_size
52
+
53
+ def get_time(self):
54
+ if self.synchronize and torch.cuda.is_available():
55
+ torch.cuda.synchronize()
56
+ return round(time.time(), self.precision)
57
+
58
+ def reset(self):
59
+ self.last_checkpoint = self.get_time()
60
+
61
+ def __enter__(self):
62
+ self.start = self.get_time()
63
+ return self
64
+
65
+ def __exit__(self, typ, value, traceback):
66
+ self._duration = self.get_time() - self.start
67
+ if self.msg:
68
+ logger.info("%s [took %s]", self.msg, format_time(self._duration))
69
+
70
+ def __call__(self, stage_name: str):
71
+ current_time = self.get_time()
72
+ duration = (current_time - self.last_checkpoint)
73
+ self.last_checkpoint = current_time
74
+ self.time_history[stage_name].append(duration)
75
+ return duration
76
+
77
+ def get_info(self, averaged=True):
78
+ return {
79
+ k: round(float(np.mean(v)), self.precision) if averaged else round(v[-1], self.precision)
80
+ for k, v in self.time_history.items()
81
+ }
82
+
83
+ def __str__(self):
84
+ return str(self.get_info())
85
+
86
+ def print(self):
87
+ data = [[k, format_time(np.mean(v).item())] for k, v in self.time_history.items()]
88
+ return tabulate(data, headers=["Stage", "Time (ms)"], tablefmt="simple")
89
+
90
+
91
+ if __name__ == '__main__':
92
+ with Timer("Running...") as f:
93
+ time.sleep(1.12)
94
+
95
+ timer = Timer()
96
+ time.sleep(0.5)
97
+ timer("s1")
98
+ time.sleep(0.21)
99
+ timer("s2")
100
+ timer.print()
101
+ print(timer)
102
+ print(timer.get_info())
CoCap/cocap/utils/registry.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2023/2/19 23:23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : registry.py
6
+
7
+
8
+ from typing import Any
9
+
10
+ from fvcore.common.registry import Registry
11
+
12
+
13
+ class LooseRegistry(Registry):
14
+ def get(self, name: str) -> Any:
15
+ ret = None
16
+ for obj_name, obj in self._obj_map.items():
17
+ if name in obj_name:
18
+ ret = obj
19
+ break
20
+ if ret is None:
21
+ raise KeyError(
22
+ "No object contains '{}' found in '{}' registry!".format(name, self._name)
23
+ )
24
+ return ret
25
+
26
+
27
+ class PrefixRegistry(Registry):
28
+ def get(self, name: str) -> Any:
29
+ ret = None
30
+ for obj_name, obj in self._obj_map.items():
31
+ if obj_name.startswith(name):
32
+ ret = obj
33
+ break
34
+ if ret is None:
35
+ raise KeyError(
36
+ "No object starts with '{}' found in '{}' registry!".format(name, self._name)
37
+ )
38
+ return ret
39
+
40
+
41
+ class PostfixRegistry(Registry):
42
+ def get(self, name: str) -> Any:
43
+ ret = None
44
+ for obj_name, obj in self._obj_map.items():
45
+ if obj_name.endswith(name):
46
+ ret = obj
47
+ break
48
+ if ret is None:
49
+ raise KeyError(
50
+ "No object ends with '{}' found in '{}' registry!".format(name, self._name)
51
+ )
52
+ return ret
CoCap/cocap/utils/train_utils.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/12 22:31
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : train_utils.py
6
+
7
+ import datetime
8
+ import hashlib
9
+ import itertools
10
+ import logging
11
+ import os
12
+ import pickle
13
+ import random
14
+ import time
15
+ import typing
16
+ from typing import *
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.distributed as dist
21
+ from fvcore.common.config import CfgNode
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class CudaPreFetcher:
27
+ def __init__(self, data_loader):
28
+ self.dl = data_loader
29
+ self.loader = iter(data_loader)
30
+ self.stream = torch.cuda.Stream()
31
+ self.batch = None
32
+
33
+ def preload(self):
34
+ try:
35
+ self.batch = next(self.loader)
36
+ except StopIteration:
37
+ self.batch = None
38
+ return
39
+ with torch.cuda.stream(self.stream):
40
+ self.batch = self.cuda(self.batch)
41
+
42
+ @staticmethod
43
+ def cuda(x: typing.Any):
44
+ if isinstance(x, list) or isinstance(x, tuple):
45
+ return [CudaPreFetcher.cuda(i) for i in x]
46
+ elif isinstance(x, dict):
47
+ return {k: CudaPreFetcher.cuda(v) for k, v in x.items()}
48
+ elif isinstance(x, torch.Tensor):
49
+ return x.cuda(non_blocking=True)
50
+ else:
51
+ return x
52
+
53
+ def __next__(self):
54
+ torch.cuda.current_stream().wait_stream(self.stream)
55
+ batch = self.batch
56
+ if batch is None:
57
+ raise StopIteration
58
+ self.preload()
59
+ return batch
60
+
61
+ def __iter__(self):
62
+ self.preload()
63
+ return self
64
+
65
+ def __len__(self):
66
+ return len(self.dl)
67
+
68
+
69
+ def manual_seed(cfg: CfgNode):
70
+ if cfg.SYS.DETERMINISTIC:
71
+ torch.manual_seed(cfg.SYS.SEED)
72
+ random.seed(cfg.SYS.SEED)
73
+ np.random.seed(cfg.SYS.SEED)
74
+ torch.cuda.manual_seed(cfg.SYS.SEED)
75
+ torch.backends.cudnn.deterministic = True
76
+ torch.backends.cudnn.benchmark = True
77
+ logger.debug("Manual seed is set")
78
+ else:
79
+ logger.warning("Manual seed is not used")
80
+
81
+
82
+ def init_distributed(proc: int, cfg: CfgNode):
83
+ if cfg.SYS.MULTIPROCESS: # initialize multiprocess
84
+ word_size = cfg.SYS.NUM_GPU * cfg.SYS.NUM_SHARDS
85
+ rank = cfg.SYS.NUM_GPU * cfg.SYS.SHARD_ID + proc
86
+ dist.init_process_group(backend="nccl", init_method=cfg.SYS.INIT_METHOD, world_size=word_size, rank=rank)
87
+ torch.cuda.set_device(cfg.SYS.GPU_DEVICES[proc])
88
+
89
+
90
+ def save_config(cfg: CfgNode):
91
+ if not dist.is_initialized() or dist.get_rank() == 0:
92
+ config_file = os.path.join(cfg.LOG.DIR, f"config_{get_timestamp()}.yaml")
93
+ with open(config_file, "w") as f:
94
+ f.write(cfg.dump())
95
+ logger.debug("config is saved to %s", config_file)
96
+
97
+
98
+ def get_timestamp():
99
+ return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
100
+
101
+
102
+ def gather_object_multiple_gpu(list_object: List[Any], backend: AnyStr = "nccl", shared_folder=None,
103
+ retry=600, sleep=0.1):
104
+ """
105
+ gather a list of something from multiple GPU
106
+ """
107
+ assert type(list_object) == list, "`list_object` only receive list."
108
+ assert backend in ["nccl", "filesystem"]
109
+ if backend == "nccl":
110
+ gathered_objects = [None for _ in range(dist.get_world_size())]
111
+ dist.all_gather_object(gathered_objects, list_object)
112
+ return list(itertools.chain(*gathered_objects))
113
+ else:
114
+ assert shared_folder is not None, "`share_folder` should be set if backend is `filesystem`"
115
+ os.makedirs(shared_folder, exist_ok=True)
116
+ uuid = torch.randint(99999999, 99999999999, size=(1,), dtype=torch.long).cuda()
117
+ dist.all_reduce(uuid)
118
+ uuid = hex(uuid.cpu().item())[-8:]
119
+ with open(os.path.join(shared_folder, f"{uuid}_rank_{dist.get_rank():04d}.pkl"), "wb") as f:
120
+ data = pickle.dumps(list_object)
121
+ f.write(data)
122
+ with open(os.path.join(shared_folder, f"{uuid}_rank_{dist.get_rank():04d}.md5"), "wb") as f:
123
+ checksum = hashlib.md5(data).hexdigest()
124
+ pickle.dump(checksum, f)
125
+ gathered_list = []
126
+ dist.barrier()
127
+ for rank in range(dist.get_world_size()):
128
+ data_filename = os.path.join(shared_folder, f"{uuid}_rank_{rank:04d}.pkl")
129
+ checksum_filename = os.path.join(shared_folder, f"{uuid}_rank_{rank:04d}.md5")
130
+ data = None
131
+ for _ in range(retry):
132
+ time.sleep(sleep)
133
+ try:
134
+ if not os.path.exists(data_filename):
135
+ continue
136
+ if not os.path.exists(checksum_filename):
137
+ continue
138
+ raw_data = open(data_filename, "rb").read()
139
+ checksum = pickle.load(open(checksum_filename, "rb"))
140
+ assert checksum == hashlib.md5(raw_data).hexdigest()
141
+ data = pickle.loads(raw_data)
142
+ break
143
+ except Exception:
144
+ pass
145
+ assert data is not None, f"Gather from filesystem failed after retry for {retry} times."
146
+ gathered_list.extend(data)
147
+ dist.barrier()
148
+ return gathered_list
CoCap/cocap/utils/video.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 8/7/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : video.py
6
+
7
+
8
+ import os
9
+ import subprocess
10
+ from typing import *
11
+
12
+ import cv2
13
+ from joblib import Parallel, delayed
14
+
15
+ __all__ = ["get_duration_info", "convert_video"]
16
+
17
+
18
+ def _get_single_video_duration_info(video_path) -> (float, float, int):
19
+ """
20
+ return video duration in seconds
21
+ :param video_path: video path
22
+ :return: video duration, fps, frame count
23
+ """
24
+ video = cv2.VideoCapture(video_path)
25
+ fps = video.get(cv2.CAP_PROP_FPS)
26
+ frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
27
+ return frame_count / fps, fps, int(frame_count)
28
+
29
+
30
+ def get_duration_info(video_paths: Union[str, Iterable]) -> (float, float, int):
31
+ """
32
+
33
+ :param video_paths: video path or a list of video path
34
+ :return: video duration, fps, frame count
35
+ """
36
+ if isinstance(video_paths, str):
37
+ return _get_single_video_duration_info(video_paths)
38
+ else:
39
+ return Parallel(n_jobs=os.cpu_count())(
40
+ delayed(_get_single_video_duration_info)(path) for path in video_paths
41
+ )
42
+
43
+
44
+ def convert_video(input_file: AnyStr, output_file: AnyStr,
45
+ ffmpeg_exec: AnyStr = "/usr/bin/ffmpeg",
46
+ codec="libx264",
47
+ keyint: int = None,
48
+ overwrite: bool = False,
49
+ verbose: bool = False,
50
+ resize: tuple = None) -> None:
51
+ """
52
+ :param input_file:
53
+ :param output_file:
54
+ :param ffmpeg_exec:
55
+ :param codec: supported video codec, e.g., libx264 and libx265
56
+ :param keyint:
57
+ :param overwrite:
58
+ :param verbose:
59
+ :param resize:
60
+ """
61
+ assert codec is None or codec in ["libx264", "libx265"], "Video codec {} is not supported.".format(codec)
62
+ assert keyint is None or codec is not None, "Codec must be specified if keyint is not None."
63
+
64
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
65
+
66
+ command = [ffmpeg_exec, "-i", f"{input_file}", "-max_muxing_queue_size", "9999"]
67
+ if codec is not None:
68
+ # use specified codec
69
+ command += ["-c:v", codec]
70
+ if codec == "libx265":
71
+ command += ["-vtag", "hvc1"]
72
+ if keyint is not None and codec == "libx264":
73
+ command += ["-x264-params", f"keyint={keyint}"]
74
+ elif keyint is not None and codec == "libx265":
75
+ command += ["-x265-params", f"keyint={keyint}"]
76
+ if resize is not None:
77
+ if isinstance(resize, int): # resize height
78
+ assert resize % 2 == 0, "size is not divisible by 2"
79
+ command += [
80
+ "-vf",
81
+ f"scale='if(gt(ih,iw),{resize},trunc(oh*a/2)*2)':'if(gt(ih, iw),trunc(ow/a/2)*2,{resize})'"
82
+ ]
83
+ elif (isinstance(resize, type) or isinstance(resize, list)) and len(resize) == 2:
84
+ assert isinstance(resize[0], int) and isinstance(resize[1], int), "size should be int"
85
+ assert resize[0] % 2 == 0 and resize[1] % 2 == 0, "size is not divisible by 2"
86
+ command += ["-vf", f"scale={resize[0]}:{resize[1]}"]
87
+ else:
88
+ raise ValueError("size is not supported: {}".format(resize))
89
+ command += ["-c:a", "copy", "-movflags", "faststart", f"{output_file}"]
90
+
91
+ if overwrite:
92
+ command += ["-y"]
93
+ else:
94
+ command += ["-n"]
95
+ subprocess.run(command,
96
+ stderr=subprocess.DEVNULL if not verbose else None,
97
+ stdout=subprocess.DEVNULL if not verbose else None)
98
+ # TODO: return
CoCap/cocap/utils/visualize.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 9/5/23
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : visualize.py
6
+
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from torchvision.transforms.functional import normalize
11
+
12
+
13
+ def _convert_to_numpy_array(data):
14
+ # convert any type to numpy array
15
+ if isinstance(data, np.ndarray):
16
+ pass
17
+ elif isinstance(data, torch.Tensor):
18
+ data = data.detach().cpu().numpy()
19
+ elif isinstance(data, Image.Image):
20
+ data = np.array(data)
21
+ else:
22
+ raise ValueError("Data type is not supported to convert to numpy array: {}".format(type(data)))
23
+ return data
24
+
25
+
26
+ def _convert_to_torch_tensor(data):
27
+ # convert any type tot torch tensor
28
+ if isinstance(data, torch.Tensor):
29
+ pass
30
+ elif isinstance(data, np.ndarray):
31
+ data = torch.from_numpy(data)
32
+ else:
33
+ raise ValueError("Data type is not supported to convert to torch.Tensor: {}".format(type(data)))
34
+ return data.detach().cpu()
35
+
36
+
37
+ def inv_normalize(image_or_video, mean, std):
38
+ # convert to torch.Tensor
39
+ data = _convert_to_torch_tensor(image_or_video)
40
+ data = normalize(data, mean=[-m / s for m, s in zip(mean, std)], std=[1 / s for s in std])
41
+ return data.numpy()
CoCap/cocap/utils/writer.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/11/13 00:41
3
+ # @Author : Yaojie Shen
4
+ # @Project : CoCap
5
+ # @File : writer.py
6
+
7
+ import torch.distributed as dist
8
+ from torch.utils.tensorboard import SummaryWriter
9
+
10
+
11
+ class DummySummaryWriter:
12
+ """
13
+ Issue: https://github.com/pytorch/pytorch/issues/24236
14
+ """
15
+
16
+ def __init__(*args, **kwargs):
17
+ pass
18
+
19
+ def __call__(self, *args, **kwargs):
20
+ return self
21
+
22
+ def __getattr__(self, *args, **kwargs):
23
+ return self
24
+
25
+
26
+ def get_writer(*args, **kwargs):
27
+ if not dist.is_initialized() or dist.get_rank() == 0:
28
+ return SummaryWriter(*args, **kwargs)
29
+ else:
30
+ return DummySummaryWriter()