| ''' |
| * The Recognize Anything Model (RAM) |
| * Written by Xinyu Huang |
| ''' |
| import json |
| import warnings |
|
|
| import numpy as np |
| import torch |
| from torch import nn |
|
|
| from .bert import BertConfig, BertLMHeadModel, BertModel |
| from .swin_transformer import SwinTransformer |
| from .utils import * |
|
|
| warnings.filterwarnings("ignore") |
|
|
|
|
|
|
| class RAM(nn.Module): |
| def __init__(self, |
| med_config=f'{CONFIG_PATH}/configs/med_config.json', |
| image_size=384, |
| vit='base', |
| vit_grad_ckpt=False, |
| vit_ckpt_layer=0, |
| prompt='a picture of ', |
| threshold=0.68, |
| delete_tag_index=[], |
| tag_list=f'{CONFIG_PATH}/data/ram_tag_list.txt', |
| tag_list_chinese=f'{CONFIG_PATH}/data/ram_tag_list_chinese.txt'): |
| r""" The Recognize Anything Model (RAM) inference module. |
| RAM is a strong image tagging model, which can recognize any common category with high accuracy. |
| Described in the paper " Recognize Anything: A Strong Image Tagging Model" https://recognize-anything.github.io/ |
| |
| Args: |
| med_config (str): path for the mixture of encoder-decoder model's configuration file |
| image_size (int): input image size |
| vit (str): model size of vision transformer |
| threshold (int): tagging threshold |
| delete_tag_index (list): delete some tags that may disturb captioning |
| """ |
| super().__init__() |
|
|
| |
| if vit == 'swin_b': |
| if image_size == 224: |
| vision_config_path = f'{CONFIG_PATH}/configs/swin/config_swinB_224.json' |
| elif image_size == 384: |
| vision_config_path = f'{CONFIG_PATH}/configs/swin/config_swinB_384.json' |
| vision_config = read_json(vision_config_path) |
| assert image_size == vision_config['image_res'] |
| |
| vision_width = vision_config['vision_width'] |
|
|
| self.visual_encoder = SwinTransformer( |
| img_size=vision_config['image_res'], |
| patch_size=4, |
| in_chans=3, |
| embed_dim=vision_config['embed_dim'], |
| depths=vision_config['depths'], |
| num_heads=vision_config['num_heads'], |
| window_size=vision_config['window_size'], |
| mlp_ratio=4., |
| qkv_bias=True, |
| drop_rate=0.0, |
| drop_path_rate=0.1, |
| ape=False, |
| patch_norm=True, |
| use_checkpoint=False) |
|
|
| elif vit == 'swin_l': |
| if image_size == 224: |
| vision_config_path = f'{CONFIG_PATH}/configs/swin/config_swinL_224.json' |
| elif image_size == 384: |
| vision_config_path = f'{CONFIG_PATH}/configs/swin/config_swinL_384.json' |
| elif image_size == 444: |
| vision_config_path = f'{CONFIG_PATH}/configs/swin/config_swinL_444.json' |
| vision_config = read_json(vision_config_path) |
| assert image_size == vision_config['image_res'] |
| |
| vision_width = vision_config['vision_width'] |
|
|
| self.visual_encoder = SwinTransformer( |
| img_size=vision_config['image_res'], |
| patch_size=4, |
| in_chans=3, |
| embed_dim=vision_config['embed_dim'], |
| depths=vision_config['depths'], |
| num_heads=vision_config['num_heads'], |
| window_size=vision_config['window_size'], |
| mlp_ratio=4., |
| qkv_bias=True, |
| drop_rate=0.0, |
| drop_path_rate=0.1, |
| ape=False, |
| patch_norm=True, |
| use_checkpoint=False) |
|
|
| else: |
| self.visual_encoder, vision_width = create_vit( |
| vit, image_size, vit_grad_ckpt, vit_ckpt_layer) |
|
|
| |
| self.tokenizer = init_tokenizer() |
|
|
| |
| |
| encoder_config = BertConfig.from_json_file(med_config) |
| encoder_config.encoder_width = 512 |
| self.tag_encoder = BertModel(config=encoder_config, |
| add_pooling_layer=False) |
|
|
| |
| decoder_config = BertConfig.from_json_file(med_config) |
| self.text_decoder = BertLMHeadModel(config=decoder_config) |
|
|
| self.delete_tag_index = delete_tag_index |
| self.prompt = prompt |
| self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1 |
|
|
| |
| self.tag_list = self.load_tag_list(tag_list) |
| self.tag_list_chinese = self.load_tag_list(tag_list_chinese) |
|
|
| |
| self.threshold = threshold |
| self.num_class = len(self.tag_list) |
| q2l_config = BertConfig.from_json_file(f'{CONFIG_PATH}/configs/q2l_config.json') |
| q2l_config.encoder_width = 512 |
| self.tagging_head = BertModel(config=q2l_config, |
| add_pooling_layer=False) |
| self.tagging_head.resize_token_embeddings(len(self.tokenizer)) |
| |
| self.label_embed = nn.Parameter(torch.zeros(self.num_class, q2l_config.encoder_width)) |
|
|
| if q2l_config.hidden_size != 512: |
| self.wordvec_proj = nn.Linear(512, q2l_config.hidden_size) |
| else: |
| self.wordvec_proj = nn.Identity() |
|
|
| self.fc = nn.Linear(q2l_config.hidden_size, 1) |
|
|
| self.del_selfattention() |
|
|
| |
| tie_encoder_decoder_weights(self.tag_encoder, self.tagging_head, '', |
| ' ') |
| self.image_proj = nn.Linear(vision_width, 512) |
| |
|
|
| |
| self.class_threshold = torch.ones(self.num_class) * self.threshold |
| ram_class_threshold_path = f'{CONFIG_PATH}/data/ram_tag_list_threshold.txt' |
| with open(ram_class_threshold_path, 'r', encoding='utf-8') as f: |
| ram_class_threshold = [float(s.strip()) for s in f] |
| for key,value in enumerate(ram_class_threshold): |
| self.class_threshold[key] = value |
|
|
| def load_tag_list(self, tag_list_file): |
| with open(tag_list_file, 'r', encoding="utf-8") as f: |
| tag_list = f.read().splitlines() |
| tag_list = np.array(tag_list) |
| return tag_list |
|
|
| |
| def del_selfattention(self): |
| del self.tagging_head.embeddings |
| for layer in self.tagging_head.encoder.layer: |
| del layer.attention |
|
|
| def condition_forward(self, |
| image, |
| threshold=0.68, |
| condition_flag=None, |
| tag_input=None, |
| only_feature=True, |
| ): |
| |
| label_embed = torch.nn.functional.relu(self.wordvec_proj(self.label_embed)) |
|
|
| image_embeds = self.image_proj(self.visual_encoder(image)) |
| if only_feature: |
| return image_embeds |
| else: |
| image_atts = torch.ones(image_embeds.size()[:-1], |
| dtype=torch.long).to(image.device) |
|
|
| |
| image_cls_embeds = image_embeds[:, 0, :] |
| image_spatial_embeds = image_embeds[:, 1:, :] |
|
|
| bs = image_spatial_embeds.shape[0] |
| label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1) |
| tagging_embed = self.tagging_head( |
| encoder_embeds=label_embed, |
| encoder_hidden_states=image_embeds, |
| encoder_attention_mask=image_atts, |
| return_dict=False, |
| mode='tagging', |
| ) |
|
|
| logits = self.fc(tagging_embed[0]).squeeze(-1) |
|
|
| targets = torch.where( |
| torch.sigmoid(logits) > self.class_threshold.to(image.device), |
| torch.tensor(1.0).to(image.device), |
| torch.zeros(self.num_class).to(image.device)) |
|
|
| return image_embeds, logits, targets |
|
|
| def generate_tag(self, |
| image, |
| threshold=0.68, |
| tag_input=None, |
| ): |
| |
| label_embed = torch.nn.functional.relu(self.wordvec_proj(self.label_embed)) |
|
|
| image_embeds = self.image_proj(self.visual_encoder(image)) |
| image_atts = torch.ones(image_embeds.size()[:-1], |
| dtype=torch.long).to(image.device) |
|
|
| |
| image_cls_embeds = image_embeds[:, 0, :] |
| image_spatial_embeds = image_embeds[:, 1:, :] |
|
|
| bs = image_spatial_embeds.shape[0] |
| label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1) |
| tagging_embed = self.tagging_head( |
| encoder_embeds=label_embed, |
| encoder_hidden_states=image_embeds, |
| encoder_attention_mask=image_atts, |
| return_dict=False, |
| mode='tagging', |
| ) |
|
|
| logits = self.fc(tagging_embed[0]).squeeze(-1) |
|
|
| targets = torch.where( |
| torch.sigmoid(logits) > self.class_threshold.to(image.device), |
| torch.tensor(1.0).to(image.device), |
| torch.zeros(self.num_class).to(image.device)) |
|
|
| tag = targets.cpu().numpy() |
| tag[:,self.delete_tag_index] = 0 |
| tag_output = [] |
| tag_output_chinese = [] |
| for b in range(bs): |
| index = np.argwhere(tag[b] == 1) |
| token = self.tag_list[index].squeeze(axis=1) |
| |
| tag_output.append(', '.join(token)) |
| token_chinese = self.tag_list_chinese[index].squeeze(axis=1) |
| |
| tag_output_chinese.append(', '.join(token_chinese)) |
|
|
|
|
| return tag_output, tag_output_chinese |
|
|
| def generate_tag_openset(self, |
| image, |
| threshold=0.68, |
| tag_input=None, |
| ): |
| |
| label_embed = torch.nn.functional.relu(self.wordvec_proj(self.label_embed)) |
|
|
| image_embeds = self.image_proj(self.visual_encoder(image)) |
| image_atts = torch.ones(image_embeds.size()[:-1], |
| dtype=torch.long).to(image.device) |
|
|
| |
| image_cls_embeds = image_embeds[:, 0, :] |
| image_spatial_embeds = image_embeds[:, 1:, :] |
|
|
| bs = image_spatial_embeds.shape[0] |
| label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1) |
| tagging_embed = self.tagging_head( |
| encoder_embeds=label_embed, |
| encoder_hidden_states=image_embeds, |
| encoder_attention_mask=image_atts, |
| return_dict=False, |
| mode='tagging', |
| ) |
|
|
| logits = self.fc(tagging_embed[0]).squeeze(-1) |
|
|
| targets = torch.where( |
| torch.sigmoid(logits) > self.class_threshold.to(image.device), |
| torch.tensor(1.0).to(image.device), |
| torch.zeros(self.num_class).to(image.device)) |
|
|
| tag = targets.cpu().numpy() |
| tag[:,self.delete_tag_index] = 0 |
| tag_output = [] |
| for b in range(bs): |
| index = np.argwhere(tag[b] == 1) |
| token = self.tag_list[index].squeeze(axis=1) |
| tag_output.append(' | '.join(token)) |
|
|
| return tag_output |
|
|
|
|
| |
| def ram(pretrained='', **kwargs): |
| model = RAM(**kwargs) |
| if pretrained: |
| if kwargs['vit'] == 'swin_b': |
| model, msg = load_checkpoint_swinbase(model, pretrained, kwargs) |
| elif kwargs['vit'] == 'swin_l': |
| model, msg = load_checkpoint_swinlarge(model, pretrained, kwargs) |
| else: |
| model, msg = load_checkpoint(model, pretrained) |
| print('vit:', kwargs['vit']) |
| |
| return model |
|
|