id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
15,751 | import numpy as np
from scipy import interpolate
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem:
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x | Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
15,752 | import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from torch.nn import MultiheadAttention
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
_MODELS = {
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8, kernel_size=3, dw_reduction=1.5,
temporal_downsample=True,
no_lmhra=-False, double_lmhra=True,
# global block
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
padding = (kernel_size - 1) // 2
if temporal_downsample:
self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
t_size = t_size // 2
else:
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads, dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
no_lmhra=no_lmhra, double_lmhra=double_lmhra,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x)
return out
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def intern_action_l14(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
temporal_downsample=True,
no_lmhra=False, double_lmhra=True,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
temporal_downsample=temporal_downsample,
no_lmhra=no_lmhra,
double_lmhra=double_lmhra,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval() | null |
15,753 | import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from torch.nn import MultiheadAttention
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
_MODELS = {
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8, kernel_size=3, dw_reduction=1.5,
temporal_downsample=True,
no_lmhra=-False, double_lmhra=True,
# global block
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
padding = (kernel_size - 1) // 2
if temporal_downsample:
self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
t_size = t_size // 2
else:
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads, dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
no_lmhra=no_lmhra, double_lmhra=double_lmhra,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x)
return out
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def intern_action_l14_336(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
no_temporal_downsample=True,
no_lmhra=False, double_lmhra=True,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
no_temporal_downsample=no_temporal_downsample,
no_lmhra=no_lmhra,
double_lmhra=double_lmhra,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval() | null |
15,754 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
def get_gpu_memory(max_gpus=None):
gpu_memory = []
num_gpus = (
torch.cuda.device_count()
if max_gpus is None
else min(max_gpus, torch.cuda.device_count())
)
for gpu_id in range(num_gpus):
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024 ** 3)
allocated_memory = torch.cuda.memory_allocated() / (1024 ** 3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
return gpu_memory | null |
15,755 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
class Blip2LlaMAForConditionalGeneration(Blip2PreTrainedModel):
config_class = Blip2Config
main_input_name = "pixel_values"
def __init__(self, config: Blip2Config):
super().__init__(config)
self.vision_model = Blip2VisionModel(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = Blip2QFormerModel(config.qformer_config)
language_model = LlamaForCausalLM(config.text_config)
self.language_model = language_model
self.language_projection = nn.Linear(config.qformer_config.hidden_size, language_model.config.hidden_size)
self.config.hidden_size = config.text_config.hidden_size
self.num_queries = config.num_query_tokens
self.offset = 5
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.language_model.get_output_embeddings()
def get_encoder(self):
return self.language_model.get_encoder()
def get_decoder(self):
return self.language_model.get_decoder()
def extract_feature(
self,
pixel_values: torch.FloatTensor,
):
image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=True,
)
query_output = query_outputs.last_hidden_state
language_model_inputs = self.language_projection(query_output)
return language_model_inputs
def _tie_weights(self):
if not self.config.use_decoder_only_language_model:
self.language_model.encoder.embed_tokens = self.language_model.shared
self.language_model.decoder.embed_tokens = self.language_model.shared
def _preprocess_accelerate(self):
r"""
Some pre-processing hacks to make the model `accelerate` compatible. Check
https://github.com/huggingface/transformers/pull/21707 for more details.
"""
hf_device_map = self.hf_device_map
if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
# warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`.
logger.warning(
"The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
" Please pass a `device_map` that contains `language_model` to remove this warning."
" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for",
" more details on creating a `device_map` for large models.",
)
if hasattr(self.language_model, "_hf_hook"):
self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: torch.FloatTensor,
attention_mask: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# step 1: forward the images through the vision encoder,
# to get image embeddings of shape (batch_size, seq_len, hidden_size)
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[0]
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
query_output = query_outputs[0]
# step 3: use the language model, conditioned on the query outputs and the prompt
language_model_inputs = self.language_projection(query_output)
assert language_model_inputs.shape[1] == self.num_queries
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
# Human: <img><IMAGE></img>. Give the describe Assistant:
# position of <image>: [offset: offset+num_queries]
inputs_embeds[:, self.offset:self.offset + self.num_queries, :] = language_model_inputs
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
# we compute the loss here since we need to take into account the sequence length of the query embeds
if labels is not None:
logits = logits[:, -labels.size(1):, :]
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous().to(logits.device).to(torch.long)
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(reduction="mean")
loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
if not return_dict:
output = (logits, vision_outputs, query_outputs, outputs)
return ((loss,) + output) if loss is not None else output
return Blip2ForConditionalGenerationModelOutput(
loss=loss,
logits=logits,
vision_outputs=vision_outputs,
qformer_outputs=query_outputs,
language_model_outputs=outputs,
)
def generate(
self,
pixel_values: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
language_model_inputs: Optional[torch.FloatTensor] = None,
generation_config: Optional[GenerationConfig] = None,
**generate_kwargs,
) -> torch.LongTensor:
"""
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
Input images to be processed.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
if hasattr(self, "hf_device_map"):
# preprocess for `accelerate`
self._preprocess_accelerate()
if language_model_inputs is None:
batch_size = pixel_values.shape[0]
image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=True,
)
query_output = query_outputs.last_hidden_state
language_model_inputs = self.language_projection(query_output)
assert language_model_inputs.shape[1] == self.num_queries
if input_ids is None:
input_ids = (
torch.LongTensor([[self.config.text_config.bos_token_id]])
.repeat(batch_size, 1)
.to(image_embeds.device)
)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
# position of <image>: [offset: offset+num_queries]
inputs_embeds[:, self.offset:self.offset + self.num_queries, :] = language_model_inputs
outputs = self.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
generation_config=generation_config,
**generate_kwargs,
)
return outputs
def load_model(
model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, debug=False
):
kwargs = {"torch_dtype": torch.float16}
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False)
model = Blip2LlaMAForConditionalGeneration.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
# if load_8bit:
# compress_module(model, device)
# if (device == "cuda" and num_gpus == 1) or device == "mps":
# model.to(device)
if debug:
print(model)
model = model.eval()
return model, tokenizer | null |
15,756 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
def build_transform(input_size):
crop_pct = 224 / 256
size = int(input_size / crop_pct)
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(size, interpolation=InterpolationMode.BICUBIC),
T.CenterCrop(input_size),
T.ToTensor(),
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
return transform | null |
15,757 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
DEFAULT_IMAGE_TOKEN = "<ImageContent>"
DEFAULT_IMG_START_TOKEN = "<img>"
DEFAULT_IMG_END_TOKEN = "</img>"
def load_image(image_file):
if image_file.startswith('http') or image_file.startswith('https'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
class StoppingCriteriaSub(StoppingCriteria):
def __init__(self, stops, encounters=1):
super().__init__()
self.stops = stops
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):
for stop in self.stops:
if torch.all((stop == input_ids[0][-len(stop):])).item():
return True
return False
def generate_stream(
model, tokenizer, image_processor, params, device
):
prompt = params["prompt"]
images = params.get("images", None)
temperature = float(params.get("temperature", 0.7))
max_new_tokens = int(params.get("max_new_tokens", 1024))
num_queries = model.config.num_query_tokens
stop_words = ["Human: ", "Assistant: ", "###", "\n\n"]
stop_words_ids = [tokenizer(stop_word, return_tensors='pt')[
'input_ids'].squeeze() for stop_word in stop_words]
stopping_criteria = StoppingCriteriaList(
[StoppingCriteriaSub(stops=stop_words_ids)])
if images is not None:
pixel_values = image_processor(load_image(images)).to(
device) # only support one image
image_query = DEFAULT_IMG_START_TOKEN + \
DEFAULT_IMAGE_TOKEN * num_queries + DEFAULT_IMG_END_TOKEN
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, image_query)
model_inputs = tokenizer([prompt], return_tensors="pt")
model_inputs["pixel_values"] = pixel_values
model_inputs.pop("token_type_ids", None)
else:
raise NotImplementedError
generation_config = GenerationConfig(
bos_token_id=1,
do_sample=True,
temperature=temperature,
max_new_tokens=max_new_tokens,
stopping_criteria=stopping_criteria
)
generation_output = model.generate(
**model_inputs,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True
)
preds = generation_output.sequences
outputs = tokenizer.batch_decode(preds, skip_special_tokens=True)
return outputs | null |
15,758 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
def resize_pos_embed(posemb, posemb_new, num_prefix_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict.
ntok_new = posemb_new.shape[1]
if num_prefix_tokens:
posemb_prefix, posemb_grid = posemb[:,
:num_prefix_tokens], posemb[0, num_prefix_tokens:]
ntok_new -= num_prefix_tokens
else:
posemb_prefix, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
posemb_grid = posemb_grid.reshape(
1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(
posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(
0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_prefix, posemb_grid], dim=1)
return posemb | null |
15,759 | import abc
from typing import Optional
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode
from transformers import (
AutoTokenizer,
GenerationConfig,
StoppingCriteria,
StoppingCriteriaList,
Blip2VisionConfig
)
from .husky_src.husky_chat import Blip2LlaMAForConditionalGeneration
from .husky_src.load_ckpt import apply_delta
from .husky_src.conversation import (
conv_templates,
get_default_conv_template,
)
from .husky_src.compression import compress_module, replace_linear, compress_module_V2 ,decompress_module_V2
from .utils import prompts, gen_new_name
def apply_delta(base_model_path, target_model_path, delta_path):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print("Loading delta")
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta = Blip2LlaMAForConditionalGeneration.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print("Applying delta")
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
if name.startswith('language_model'):
name = name[len('language_model.'):]
if param.data.shape == base.state_dict()[name].shape:
param.data += base.state_dict()[name]
else:
bparam = base.state_dict()[name]
param.data[:bparam.shape[0], :bparam.shape[1]] += bparam
else:
pass
print("Saving target model")
delta.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path)
def write_model(model_path, input_base_path, model_size):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
params = read_json(os.path.join(input_base_path, "params.json"))
num_shards = NUM_SHARDS[model_size]
n_layers = params["n_layers"]
n_heads = params["n_heads"]
n_heads_per_shard = n_heads // num_shards
dim = params["dim"]
dims_per_head = dim // n_heads
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
# permute for sliced rotary
def permute(w):
return w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim)
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
else:
# Sharded
loaded = [
torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
for i in range(num_shards)
]
param_count = 0
index_dict = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"]
),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"]
),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that in the 13B checkpoint, not cloning the two following weights will result in the checkpoint
# becoming 37GB instead of 26GB for some reason.
state_dict = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
for i in range(num_shards)
],
dim=0,
).reshape(dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(n_heads_per_shard, dims_per_head, dim)
for i in range(num_shards)
],
dim=0,
).reshape(dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(n_heads_per_shard, dims_per_head, dim)
for i in range(num_shards)
],
dim=0,
).reshape(dim, dim)
state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
)
state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
)
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
state_dict = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
config = LlamaConfig(
hidden_size=dim,
intermediate_size=compute_intermediate_size(dim),
num_attention_heads=params["n_heads"],
num_hidden_layers=params["n_layers"],
rms_norm_eps=params["norm_eps"],
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model.")
model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path)
shutil.rmtree(tmp_model_path)
def write_tokenizer(tokenizer_path, input_tokenizer_path):
# Initialize the tokenizer based on the `spm` model
tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
tokenizer = tokenizer_class(input_tokenizer_path)
tokenizer.save_pretrained(tokenizer_path)
def download_if_not_exists(base_path, delta_path, new_path):
if os.path.exists(new_path):
return
if not os.path.exists(base_path):
# download if not exists
os.system('bash third-party/llama_download.sh')
output_dir = os.path.join(os.path.dirname(base_path), 'llama_7B_hf')
if not os.path.exists(output_dir):
# convert to hf format if not exists
from .husky_src.convert_llama_weights_to_hf import write_model, write_tokenizer
write_model(
model_path=output_dir,
input_base_path=os.path.join(base_path, '7B'),
model_size="7B",
)
spm_path = os.path.join(base_path, "tokenizer.model")
write_tokenizer(output_dir, spm_path)
apply_delta(output_dir, new_path, delta_path) | null |
15,761 | import logging
import os
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from lvis import LVIS
def load_GRiTcoco_json(json_file, image_root, dataset_name=None):
def register_GRiTcoco_instances(name, metadata, json_file, image_root):
"""
"""
DatasetCatalog.register(name, lambda: load_GRiTcoco_json(
json_file, image_root, name))
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root,
evaluator_type="coco", **metadata
) | null |
15,765 | import logging
import os
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from lvis import LVIS
def load_o365_json(json_file, image_root, dataset_name=None):
'''
Load Object365 class name text for object description for GRiT
'''
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(
json_file, timer.seconds()))
class_names = {}
sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id'])
for x in sort_cat:
if '/' in x['name']:
text = ''
for xx in x['name'].split('/'):
text += xx
text += ' '
text = text[:-1]
else:
text = x['name']
class_names[x['id']] = text
img_ids = sorted(lvis_api.imgs.keys())
imgs = lvis_api.load_imgs(img_ids)
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), \
"Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in the LVIS v1 format from {}".format(
len(imgs_anns), json_file))
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
if "file_name" in img_dict:
file_name = img_dict["file_name"]
record["file_name"] = os.path.join(image_root, file_name)
record["height"] = int(img_dict["height"])
record["width"] = int(img_dict["width"])
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
assert anno["image_id"] == image_id
if anno.get('iscrowd', 0) > 0:
continue
obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
obj["category_id"] = 0
obj["object_description"] = class_names[anno['category_id']]
objs.append(obj)
record["annotations"] = objs
if len(record["annotations"]) == 0:
continue
record["task"] = "ObjectDet"
dataset_dicts.append(record)
return dataset_dicts
def register_o365_instances(name, metadata, json_file, image_root):
DatasetCatalog.register(name, lambda: load_o365_json(
json_file, image_root, name))
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root,
evaluator_type="lvis", **metadata
) | null |
15,773 | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
from functools import partial
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, Mlp, trunc_normal_
from detectron2.modeling.backbone.backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
class ViT(Backbone):
"""
This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
"Exploring Plain Vision Transformer Backbones for Object Detection",
https://arxiv.org/abs/2203.16527
"""
def __init__(
self,
img_size=1024,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_abs_pos=True,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
window_block_indexes=(),
residual_block_indexes=(),
use_act_checkpoint=True,
pretrain_img_size=224,
pretrain_use_cls_token=True,
out_feature="last_feat",
):
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
window_block_indexes (list): Indexes for blocks using window attention.
residual_block_indexes (list): Indexes for blocks using conv propagation.
use_act_checkpoint (bool): If True, use activation checkpointing.
pretrain_img_size (int): input image size for pretraining models.
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
out_feature (str): name of the feature from the last block.
"""
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.use_act_checkpoint = use_act_checkpoint
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
use_residual_block=i in residual_block_indexes,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self._out_feature_channels = {out_feature: embed_dim}
self._out_feature_strides = {out_feature: patch_size}
self._out_features = [out_feature]
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + get_abs_pos(
self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])
)
for blk in self.blocks:
if self.use_act_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x.permute(0, 3, 1, 2)
class ViT_FPN(Backbone):
def __init__(self, bottom_up=None, top_block=None, out_channels=None, strides=None, vit_out_dim=None):
super(ViT_FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = bottom_up
self.top_block = top_block
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[2]
self.maxpool = nn.MaxPool2d(2, stride=2)
self.fpn_stride_16_8 = nn.ConvTranspose2d(vit_out_dim, vit_out_dim, 2, stride=2, bias=False)
self.fpn_stride8_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride8_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride8_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride8_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride16_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride16_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride32_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride32_norm2 = nn.LayerNorm(out_channels)
def forward(self, x):
vit_output_featuremap = self.bottom_up(x)
stride8_feature = self.fpn_stride_16_8(vit_output_featuremap)
stride8_feature = self.fpn_stride8_norm1(self.fpn_stride8_conv1(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride8_feature = self.fpn_stride8_norm2(self.fpn_stride8_conv2(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.maxpool(vit_output_featuremap)
stride32_feature = self.fpn_stride32_norm1(self.fpn_stride32_conv1(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.fpn_stride32_norm2(self.fpn_stride32_conv2(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm1(self.fpn_stride16_conv1(vit_output_featuremap).
permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm2(self.fpn_stride16_conv2(stride16_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
results = [stride8_feature, stride16_feature, stride32_feature]
results.extend(self.top_block(stride32_feature))
assert len(self._out_features) == len(results)
fpn_out = {f: res for f, res in zip(self._out_features, results)}
return fpn_out
def size_divisibility(self):
return self._size_divisibility
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class LastLevelP6P7_P5(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec):
embed_dim = 768
vit_out_dim = embed_dim
bottom_up = ViT( # Single-scale ViT backbone
img_size=1024,
patch_size=16,
embed_dim=embed_dim,
depth=12,
num_heads=12,
drop_path_rate=0.1,
window_size=14,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
window_block_indexes=[
# 2, 5, 8 11 for global attention
0,
1,
3,
4,
6,
7,
9,
10,
],
residual_block_indexes=[],
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
use_rel_pos=True,
out_feature="last_feat",)
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
backbone = ViT_FPN(bottom_up=bottom_up,
top_block=LastLevelP6P7_P5(out_channels, out_channels),
out_channels=out_channels,
strides=[8, 16, 32, 64, 128],
vit_out_dim=vit_out_dim)
return backbone | null |
15,774 | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
from functools import partial
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, Mlp, trunc_normal_
from detectron2.modeling.backbone.backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
class ViT(Backbone):
"""
This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
"Exploring Plain Vision Transformer Backbones for Object Detection",
https://arxiv.org/abs/2203.16527
"""
def __init__(
self,
img_size=1024,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_abs_pos=True,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
window_block_indexes=(),
residual_block_indexes=(),
use_act_checkpoint=True,
pretrain_img_size=224,
pretrain_use_cls_token=True,
out_feature="last_feat",
):
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
window_block_indexes (list): Indexes for blocks using window attention.
residual_block_indexes (list): Indexes for blocks using conv propagation.
use_act_checkpoint (bool): If True, use activation checkpointing.
pretrain_img_size (int): input image size for pretraining models.
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
out_feature (str): name of the feature from the last block.
"""
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.use_act_checkpoint = use_act_checkpoint
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
use_residual_block=i in residual_block_indexes,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self._out_feature_channels = {out_feature: embed_dim}
self._out_feature_strides = {out_feature: patch_size}
self._out_features = [out_feature]
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + get_abs_pos(
self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])
)
for blk in self.blocks:
if self.use_act_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x.permute(0, 3, 1, 2)
class ViT_FPN(Backbone):
def __init__(self, bottom_up=None, top_block=None, out_channels=None, strides=None, vit_out_dim=None):
super(ViT_FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = bottom_up
self.top_block = top_block
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[2]
self.maxpool = nn.MaxPool2d(2, stride=2)
self.fpn_stride_16_8 = nn.ConvTranspose2d(vit_out_dim, vit_out_dim, 2, stride=2, bias=False)
self.fpn_stride8_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride8_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride8_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride8_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride16_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride16_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride32_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride32_norm2 = nn.LayerNorm(out_channels)
def forward(self, x):
vit_output_featuremap = self.bottom_up(x)
stride8_feature = self.fpn_stride_16_8(vit_output_featuremap)
stride8_feature = self.fpn_stride8_norm1(self.fpn_stride8_conv1(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride8_feature = self.fpn_stride8_norm2(self.fpn_stride8_conv2(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.maxpool(vit_output_featuremap)
stride32_feature = self.fpn_stride32_norm1(self.fpn_stride32_conv1(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.fpn_stride32_norm2(self.fpn_stride32_conv2(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm1(self.fpn_stride16_conv1(vit_output_featuremap).
permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm2(self.fpn_stride16_conv2(stride16_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
results = [stride8_feature, stride16_feature, stride32_feature]
results.extend(self.top_block(stride32_feature))
assert len(self._out_features) == len(results)
fpn_out = {f: res for f, res in zip(self._out_features, results)}
return fpn_out
def size_divisibility(self):
return self._size_divisibility
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class LastLevelP6P7_P5(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
def build_vit_fpn_backbone_large(cfg, input_shape: ShapeSpec):
window_block_indexes = (list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)))
embed_dim = 1024
vit_out_dim = embed_dim
bottom_up = ViT( # Single-scale ViT backbone
img_size=1024,
patch_size=16,
embed_dim=embed_dim,
depth=24,
num_heads=16,
drop_path_rate=0.4,
window_size=14,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
window_block_indexes=window_block_indexes,
residual_block_indexes=[],
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
use_rel_pos=True,
out_feature="last_feat",)
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
backbone = ViT_FPN(bottom_up=bottom_up,
top_block=LastLevelP6P7_P5(out_channels, out_channels),
out_channels=out_channels,
strides=[8, 16, 32, 64, 128],
vit_out_dim=vit_out_dim)
return backbone | null |
15,775 | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
from functools import partial
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, Mlp, trunc_normal_
from detectron2.modeling.backbone.backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
class ViT(Backbone):
"""
This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
"Exploring Plain Vision Transformer Backbones for Object Detection",
https://arxiv.org/abs/2203.16527
"""
def __init__(
self,
img_size=1024,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_abs_pos=True,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
window_block_indexes=(),
residual_block_indexes=(),
use_act_checkpoint=True,
pretrain_img_size=224,
pretrain_use_cls_token=True,
out_feature="last_feat",
):
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
window_block_indexes (list): Indexes for blocks using window attention.
residual_block_indexes (list): Indexes for blocks using conv propagation.
use_act_checkpoint (bool): If True, use activation checkpointing.
pretrain_img_size (int): input image size for pretraining models.
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
out_feature (str): name of the feature from the last block.
"""
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.use_act_checkpoint = use_act_checkpoint
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
use_residual_block=i in residual_block_indexes,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self._out_feature_channels = {out_feature: embed_dim}
self._out_feature_strides = {out_feature: patch_size}
self._out_features = [out_feature]
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + get_abs_pos(
self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])
)
for blk in self.blocks:
if self.use_act_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x.permute(0, 3, 1, 2)
class ViT_FPN(Backbone):
def __init__(self, bottom_up=None, top_block=None, out_channels=None, strides=None, vit_out_dim=None):
super(ViT_FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = bottom_up
self.top_block = top_block
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[2]
self.maxpool = nn.MaxPool2d(2, stride=2)
self.fpn_stride_16_8 = nn.ConvTranspose2d(vit_out_dim, vit_out_dim, 2, stride=2, bias=False)
self.fpn_stride8_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride8_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride8_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride8_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride16_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride16_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride16_norm2 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
self.fpn_stride32_norm1 = nn.LayerNorm(out_channels)
self.fpn_stride32_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.fpn_stride32_norm2 = nn.LayerNorm(out_channels)
def forward(self, x):
vit_output_featuremap = self.bottom_up(x)
stride8_feature = self.fpn_stride_16_8(vit_output_featuremap)
stride8_feature = self.fpn_stride8_norm1(self.fpn_stride8_conv1(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride8_feature = self.fpn_stride8_norm2(self.fpn_stride8_conv2(stride8_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.maxpool(vit_output_featuremap)
stride32_feature = self.fpn_stride32_norm1(self.fpn_stride32_conv1(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride32_feature = self.fpn_stride32_norm2(self.fpn_stride32_conv2(stride32_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm1(self.fpn_stride16_conv1(vit_output_featuremap).
permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
stride16_feature = self.fpn_stride16_norm2(self.fpn_stride16_conv2(stride16_feature)
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
results = [stride8_feature, stride16_feature, stride32_feature]
results.extend(self.top_block(stride32_feature))
assert len(self._out_features) == len(results)
fpn_out = {f: res for f, res in zip(self._out_features, results)}
return fpn_out
def size_divisibility(self):
return self._size_divisibility
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class LastLevelP6P7_P5(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
def build_vit_fpn_backbone_huge(cfg, input_shape: ShapeSpec):
window_block_indexes = (list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)))
embed_dim = 1280
vit_out_dim = embed_dim
bottom_up = ViT( # Single-scale ViT backbone
img_size=1024,
patch_size=16,
embed_dim=embed_dim,
depth=32,
num_heads=16,
drop_path_rate=0.5,
window_size=14,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
window_block_indexes=window_block_indexes,
residual_block_indexes=[],
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
use_rel_pos=True,
out_feature="last_feat",)
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
backbone = ViT_FPN(bottom_up=bottom_up,
top_block=LastLevelP6P7_P5(out_channels, out_channels),
out_channels=out_channels,
strides=[8, 16, 32, 64, 128],
vit_out_dim=vit_out_dim)
return backbone | null |
15,785 | import argparse
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import sys
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from centernet.config import add_centernet_config
from ..grit_src.grit.config import add_grit_config
from ..grit_src.grit.predictor import VisualizationDemo
def dense_pred_to_caption_only_name(predictions):
# boxes = predictions["instances"].pred_boxes if predictions["instances"].has("pred_boxes") else None
object_description = predictions["instances"].pred_object_descriptions.data
new_caption = ",".join(object_description)
del predictions
# for i in range(len(object_description)):
# new_caption += (object_description[i] + ": " + str([int(a) for a in boxes[i].tensor.cpu().detach().numpy()[0]])) + "; "
return new_caption | null |
15,786 | import argparse
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import sys
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from centernet.config import add_centernet_config
from ..grit_src.grit.config import add_grit_config
from ..grit_src.grit.predictor import VisualizationDemo
def dense_pred_to_caption(predictions):
boxes = predictions["instances"].pred_boxes if predictions["instances"].has("pred_boxes") else None
object_description = predictions["instances"].pred_object_descriptions.data
new_caption = ""
for i in range(len(object_description)):
new_caption += (object_description[i] + ": " + str([int(a) for a in boxes[i].tensor.cpu().detach().numpy()[0]])) + "; "
return new_caption
def setup_cfg(args):
cfg = get_cfg()
add_centernet_config(cfg)
add_grit_config(cfg)
cfg.merge_from_file(args["config_file"])
cfg.merge_from_file(args["config_file"])
cfg.merge_from_list(args["opts"])
# Set score_threshold for builtin models
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args["confidence_threshold"]
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args["confidence_threshold"]
if args["test_task"]:
cfg.MODEL.TEST_TASK = args["test_task"]
cfg.MODEL.BEAM_SIZE = 1
cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
cfg.USE_ACT_CHECKPOINT = False
if args["device"]=="cpu":
cfg.MODEL.DEVICE="cpu"
cfg.freeze()
return cfg
def get_parser(device):
arg_dict = {'config_file': "iGPT/models/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml", 'device': device, 'confidence_threshold': 0.5, 'test_task': 'DenseCap', 'opts': ["MODEL.WEIGHTS", "model_zoo/grit_b_densecap_objectdet.pth"]}
return arg_dict
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE):
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image,device):
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer_GRiT(image, instance_mode=self.instance_mode)
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def image_caption_api(image_src, device):
args2 = get_parser(device)
cfg = setup_cfg(args2)
demo = VisualizationDemo(cfg)
if image_src:
img = read_image(image_src, format="BGR")
predictions, visualized_output = demo.run_on_image(img)
new_caption = dense_pred_to_caption(predictions)
return new_caption | null |
15,787 | import argparse
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import sys
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from centernet.config import add_centernet_config
from ..grit_src.grit.config import add_grit_config
from ..grit_src.grit.predictor import VisualizationDemo
def setup_cfg(args):
cfg = get_cfg()
add_centernet_config(cfg)
add_grit_config(cfg)
cfg.merge_from_file(args["config_file"])
cfg.merge_from_file(args["config_file"])
cfg.merge_from_list(args["opts"])
# Set score_threshold for builtin models
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args["confidence_threshold"]
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args["confidence_threshold"]
if args["test_task"]:
cfg.MODEL.TEST_TASK = args["test_task"]
cfg.MODEL.BEAM_SIZE = 1
cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
cfg.USE_ACT_CHECKPOINT = False
if args["device"]=="cpu":
cfg.MODEL.DEVICE="cpu"
cfg.freeze()
return cfg
def get_parser(device):
arg_dict = {'config_file': "iGPT/models/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml", 'device': device, 'confidence_threshold': 0.5, 'test_task': 'DenseCap', 'opts': ["MODEL.WEIGHTS", "model_zoo/grit_b_densecap_objectdet.pth"]}
return arg_dict
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE):
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image,device):
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer_GRiT(image, instance_mode=self.instance_mode)
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def init_demo(device):
args2 = get_parser(device)
cfg = setup_cfg(args2)
demo = VisualizationDemo(cfg)
return demo | null |
15,806 | import contextlib
from unittest import mock
import torch
from detectron2.modeling import poolers
from detectron2.modeling.proposal_generator import rpn
from detectron2.modeling.roi_heads import keypoint_head, mask_head
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from .c10 import (
Caffe2Compatible,
Caffe2FastRCNNOutputsInference,
Caffe2KeypointRCNNInference,
Caffe2MaskRCNNInference,
Caffe2ROIPooler,
Caffe2RPN,
)
def patch(model, target, updater, *args, **kwargs):
class Caffe2KeypointRCNNInference:
def __init__(self, use_heatmap_max_keypoint):
def __call__(self, pred_keypoint_logits, pred_instances):
def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
with mock.patch(
"{}.keypoint_rcnn_inference".format(patched_module),
side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
) as mocked_func:
yield
if check:
assert mocked_func.call_count > 0 | null |
15,816 | import collections
import contextlib
import copy
import functools
import logging
import numpy as np
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from unittest import mock
import caffe2.python.utils as putils
import torch
import torch.nn.functional as F
from caffe2.proto import caffe2_pb2
from caffe2.python import core, net_drawer, workspace
from torch.nn.functional import interpolate as interp
def onnx_compatibale_interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
# NOTE: The input dimensions are interpreted in the form:
# `mini-batch x channels x [optional depth] x [optional height] x width`.
if size is None and scale_factor is not None:
if input.dim() == 4:
if isinstance(scale_factor, (int, float)):
height_scale, width_scale = (scale_factor, scale_factor)
else:
assert isinstance(scale_factor, (tuple, list))
assert len(scale_factor) == 2
height_scale, width_scale = scale_factor
assert not align_corners, "No matching C2 op for align_corners == True"
if mode == "nearest":
return torch.ops._caffe2.ResizeNearest(
input, order="NCHW", width_scale=width_scale, height_scale=height_scale
)
elif mode == "bilinear":
logger.warning(
"Use F.conv_transpose2d for bilinear interpolate"
" because there's no such C2 op, this may cause significant"
" slowdown and the boundary pixels won't be as same as"
" using F.interpolate due to padding."
)
assert height_scale == width_scale
return BilinearInterpolation(input, up_scale=height_scale)
logger.warning("Output size is not static, it might cause ONNX conversion issue")
return interp(input, size, scale_factor, mode, align_corners)
def mock_torch_nn_functional_interpolate():
if torch.onnx.is_in_onnx_export():
with mock.patch(
"torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
):
yield
else:
yield | null |
15,856 | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
from .cityscapes_panoptic import register_all_cityscapes_panoptic
from .coco import load_sem_seg, register_coco_instances
from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
from .lvis import get_lvis_instances_meta, register_lvis_instances
from .pascal_voc import register_pascal_voc
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
"coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
}
_PREDEFINED_SPLITS_COCO["coco_person"] = {
"keypoints_coco_2014_train": (
"coco/train2014",
"coco/annotations/person_keypoints_train2014.json",
),
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
"keypoints_coco_2014_minival": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014.json",
),
"keypoints_coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/person_keypoints_valminusminival2014.json",
),
"keypoints_coco_2017_train": (
"coco/train2017",
"coco/annotations/person_keypoints_train2017.json",
),
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
"keypoints_coco_2017_val_100": (
"coco/val2017",
"coco/annotations/person_keypoints_val2017_100.json",
),
}
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_stuff_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_stuff_val2017",
),
"coco_2017_val_100_panoptic": (
"coco/panoptic_val2017_100",
"coco/annotations/panoptic_val2017_100.json",
"coco/panoptic_stuff_val2017_100",
),
}
def _get_builtin_metadata(dataset_name):
def register_coco_instances(name, metadata, json_file, image_root):
def register_coco_panoptic(
name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
):
def register_coco_panoptic_separated(
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
):
def register_all_coco(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
# The "separated" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic FPN
register_coco_panoptic_separated(
prefix,
_get_builtin_metadata("coco_panoptic_separated"),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_coco_panoptic(
prefix,
_get_builtin_metadata("coco_panoptic_standard"),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
instances_json,
) | null |
15,942 | import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import time
import datetime
import json
from fvcore.common.timer import Timer
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
COCOEvaluator,
LVISEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import (
CommonMetricPrinter,
EventStorage,
JSONWriter,
TensorboardXWriter,
)
from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.build import build_detection_train_loader
from centernet.config import add_centernet_config
from centernet.data.custom_build_augmentation import build_custom_augmentation
logger = logging.getLogger("detectron2")
def do_test(cfg, model):
class JSONWriter(EventWriter):
def __init__(self, json_file, window_size=20):
def write(self):
def close(self):
class TensorboardXWriter(EventWriter):
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
def write(self):
def close(self):
class CommonMetricPrinter(EventWriter):
def __init__(self, max_iter: Optional[int] = None, window_size: int = 20):
def _get_eta(self, storage) -> Optional[str]:
def write(self):
class EventStorage:
def __init__(self, start_iter=0):
def put_image(self, img_name, img_tensor):
def put_scalar(self, name, value, smoothing_hint=True):
def put_scalars(self, *, smoothing_hint=True, **kwargs):
def put_histogram(self, hist_name, hist_tensor, bins=1000):
def history(self, name):
def histories(self):
def latest(self):
def latest_with_smoothing_hint(self, window_size=20):
def smoothing_hints(self):
def step(self):
def iter(self):
def iter(self, val):
def iteration(self):
def __enter__(self):
def __exit__(self, exc_type, exc_val, exc_tb):
def name_scope(self, name):
def clear_images(self):
def clear_histograms(self):
class DatasetMapper:
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
def from_config(cls, cfg, is_train: bool = True):
def _transform_annotations(self, dataset_dict, transforms, image_shape):
def __call__(self, dataset_dict):
def build_detection_train_loader(
dataset,
*,
mapper,
sampler=None,
total_batch_size,
aspect_ratio_grouping=True,
num_workers=0,
collate_fn=None,
):
def build_custom_augmentation(cfg, is_train):
def build_custom_train_loader(cfg, mapper=None):
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(
cfg.MODEL.WEIGHTS, resume=resume,
).get("iteration", -1) + 1
)
if cfg.SOLVER.RESET_ITER:
logger.info('Reset loaded iteration. Start training from iteration 0.')
start_iter = 0
max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = (
[
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(cfg.OUTPUT_DIR),
]
if comm.is_main_process()
else []
)
mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \
DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True))
if cfg.DATALOADER.SAMPLER_TRAIN in ['TrainingSampler', 'RepeatFactorTrainingSampler']:
data_loader = build_detection_train_loader(cfg, mapper=mapper)
else:
from centernet.data.custom_dataset_dataloader import build_custom_train_loader
data_loader = build_custom_train_loader(cfg, mapper=mapper)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
step_timer = Timer()
data_timer = Timer()
start_time = time.perf_counter()
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
data_time = data_timer.seconds()
storage.put_scalars(data_time=data_time)
step_timer.reset()
iteration = iteration + 1
storage.step()
loss_dict = model(data)
losses = sum(
loss for k, loss in loss_dict.items())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() \
for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(
total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar(
"lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
step_time = step_timer.seconds()
storage.put_scalars(time=step_time)
data_timer.reset()
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and iteration % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter
):
do_test(cfg, model)
comm.synchronize()
if iteration - start_iter > 5 and \
(iteration % 20 == 0 or iteration == max_iter):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
total_time = time.perf_counter() - start_time
logger.info(
"Total training time: {}".format(
str(datetime.timedelta(seconds=int(total_time))))) | null |
16,002 | import logging
import os
import time
import weakref
from collections import OrderedDict
from typing import Any, Dict, List
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
)
from detectron2.evaluation import print_csv_format
from detectron2.evaluation.testing import flatten_results_dict
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
import pytorch_lightning as pl
from pytorch_lightning import LightningDataModule, LightningModule
from train_net import build_evaluator
logger = logging.getLogger("detectron2")
class TrainingModule(LightningModule):
def __init__(self, cfg):
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
def setup(self, stage: str):
def training_step(self, batch, batch_idx):
def training_step_end(self, training_step_outpus):
def training_epoch_end(self, training_step_outputs):
def _process_dataset_evaluation_results(self) -> OrderedDict:
def _reset_dataset_evaluators(self):
def on_validation_epoch_start(self, _outputs):
def validation_epoch_end(self, _outputs):
def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
def configure_optimizers(self):
class DataModule(LightningDataModule):
def __init__(self, cfg):
def train_dataloader(self):
def val_dataloader(self):
def train(cfg, args):
trainer_params = {
# training loop is bounded by max steps, use a large max_epochs to make
# sure max_steps is met first
"max_epochs": 10 ** 8,
"max_steps": cfg.SOLVER.MAX_ITER,
"val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else 10 ** 8,
"num_nodes": args.num_machines,
"gpus": args.num_gpus,
"num_sanity_val_steps": 0,
}
if cfg.SOLVER.AMP.ENABLED:
trainer_params["precision"] = 16
last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
if args.resume:
# resume training from checkpoint
trainer_params["resume_from_checkpoint"] = last_checkpoint
logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")
trainer = pl.Trainer(**trainer_params)
logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs")
module = TrainingModule(cfg)
data_module = DataModule(cfg)
if args.eval_only:
logger.info("Running inference")
trainer.validate(module, data_module)
else:
logger.info("Running training")
trainer.fit(module, data_module) | null |
16,015 | import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, get_cfg, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.solver import build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
def create_data_benchmark(cfg, args):
def benchmark_data_advanced(args):
# benchmark dataloader with more details to help analyze performance bottleneck
cfg = setup(args)
benchmark = create_data_benchmark(cfg, args)
if comm.get_rank() == 0:
benchmark.benchmark_dataset(100)
benchmark.benchmark_mapper(100)
benchmark.benchmark_workers(100, warmup=10)
benchmark.benchmark_IPC(100, warmup=10)
if comm.get_world_size() > 1:
benchmark.benchmark_distributed(100)
logger.info("Rerun ...")
benchmark.benchmark_distributed(100) | null |
16,020 | import torch
import os
import sys
import argparse
import importlib
from tensorboardX import SummaryWriter
from data_loader import get_dataloader
from itertools import cycle
from py_utils import write_loss, print_composite, to_float
from probe.latent_plot_utils import get_all_plots
from trainer import Trainer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--config', type=str, default='config')
return parser.parse_args() | null |
16,021 | import torch
import os
import sys
import argparse
import importlib
import numpy as np
from os.path import join as pjoin
from data_loader import get_dataloader
from latent_plot_utils import get_all_plots, get_demo_plots
from trainer import Trainer
from py_utils import to_float, ensure_dirs
def get_all_codes(cfg, output_path):
print(output_path)
if os.path.exists(output_path):
return np.load(output_path, allow_pickle=True)['data'].item()
ensure_dirs(os.path.dirname(output_path))
print("start over")
# Dataloader
train_loader = get_dataloader(cfg, 'train', shuffle=False)
test_loader = get_dataloader(cfg, 'test', shuffle=False)
# Trainer
trainer = Trainer(cfg)
trainer.to(cfg.device)
trainer.resume()
with torch.no_grad():
vis_dicts = {}
for phase, loader in [['train', train_loader],
['test', test_loader]]:
vis_dict = None
for t, data in enumerate(loader):
vis_codes = trainer.get_latent_codes(data)
if vis_dict is None:
vis_dict = {}
for key, value in vis_codes.items():
vis_dict[key] = [value]
else:
for key, value in vis_codes.items():
vis_dict[key].append(value)
for key, value in vis_dict.items():
if phase == "test" and key == "content_code":
continue
if key == "meta":
secondary_keys = value[0].keys()
num = len(value)
vis_dict[key] = {
secondary_key: [to_float(item) for i in range(num) for item in value[i][secondary_key]]
for secondary_key in secondary_keys}
else:
vis_dict[key] = torch.cat(vis_dict[key], 0)
vis_dict[key] = vis_dict[key].cpu().numpy()
vis_dict[key] = to_float(vis_dict[key].reshape(vis_dict[key].shape[0], -1))
vis_dicts[phase] = vis_dict
np.savez_compressed(output_path, data=vis_dicts)
return vis_dicts
def get_all_plots(data, output_path, writers, iter, summary=True,
style_cluster_protocols=('pca'),
separate_compute=False):
"""
data: {"train": dict_train, "test": dict_test}
dict_train: {"style2d_code": blabla, etc.}
separate_compute: compute t-SNE for 2D & 3D separately
"""
ensure_dirs(output_path)
def fig_title(title):
return pjoin(output_path, title)
def add_fig(fig, title, phase):
if summary:
writers[phase].add_figure(title, fig, global_step=iter)
keys = data["train"].keys()
has2d = "style2d_code" in keys
has3d = "style3d_code" in keys
# style codes & adain params
for suffix in ["_code", "_adain"]:
codes_raw = []
titles = []
phases = []
data_keys = []
if has2d: data_keys.append("style2d" + suffix)
if has3d: data_keys.append("style3d" + suffix)
for key in data_keys:
for phase in ["train", "test"]:
codes_raw.append(data[phase][key])
titles.append(f'{phase}_{key}')
phases.append(phase)
# calc tsne with style2/3d, train/test altogether
for name, protocol in zip(['pca', 'tsne'], [calc_pca, calc_tsne]):
if name not in style_cluster_protocols:
continue
style_codes = calc_many_blas(codes_raw, protocol)
fig = plot2D_overlay([style_codes[0], style_codes[2]],
[data["train"]["meta"]["style"], data["train"]["meta"]["style"]],
[1.0, 0.5],
fig_title(f'joint_embedding_{name}{suffix}'))
add_fig(fig, f'joint_embedding_{name}{suffix}', "train")
for i, (code, phase, title) in enumerate(zip(style_codes, phases, titles)):
if separate_compute:
code = protocol(codes_raw[i])
for label_type in ["style", "content"]:
fig = plot2D(code, data[phase]["meta"][label_type], fig_title(f'{title}_{name}_{label_type}'))
add_fig(fig, f'{title}_{name}_{label_type}', phase)
# content codes (train only)
content_code_pca = calc_pca(data["train"]["content_code"])
for label in ["style", "content", "phase"]:
if label == "phase":
indices = [i for i in range(len(data["train"]["meta"]["content"])) if data["train"]["meta"]["content"][i] == "walk"]
walk_code = content_code_pca[np.array(indices)]
phase_labels = [data["train"]["meta"]["phase"][i] for i in indices]
fig = plot2D_phase(walk_code, phase_labels, fig_title(f'content_by_{label}'))
else:
fig = plot2D(content_code_pca, data["train"]["meta"][label], fig_title(f'content_by_{label}'))
add_fig(fig, f'content_by_{label}', "train")
"""
fig = show_images_from_disk("", all_titles, 2, output_path + "all_codes")
if summary:
writers["train"].add_figure("all codes", fig, global_step=iter)
"""
def plot_all(cfg):
output_path = pjoin(cfg.main_dir, 'test_probe')
vis_dicts = get_all_codes(cfg, pjoin(output_path, 'output_codes.npz'))
get_all_plots(vis_dicts, output_path, {}, 0, summary=False,
style_cluster_protocols=('tsne'),
separate_compute=True) | null |
16,022 | import torch
import os
import sys
import argparse
import importlib
import numpy as np
from os.path import join as pjoin
BASEPATH = os.path.dirname(__file__)
from data_loader import get_dataloader
from latent_plot_utils import get_all_plots, get_demo_plots
from trainer import Trainer
from py_utils import to_float, ensure_dirs
def get_all_codes(cfg, output_path):
print(output_path)
if os.path.exists(output_path):
return np.load(output_path, allow_pickle=True)['data'].item()
ensure_dirs(os.path.dirname(output_path))
print("start over")
# Dataloader
train_loader = get_dataloader(cfg, 'train', shuffle=False)
test_loader = get_dataloader(cfg, 'test', shuffle=False)
# Trainer
trainer = Trainer(cfg)
trainer.to(cfg.device)
trainer.resume()
with torch.no_grad():
vis_dicts = {}
for phase, loader in [['train', train_loader],
['test', test_loader]]:
vis_dict = None
for t, data in enumerate(loader):
vis_codes = trainer.get_latent_codes(data)
if vis_dict is None:
vis_dict = {}
for key, value in vis_codes.items():
vis_dict[key] = [value]
else:
for key, value in vis_codes.items():
vis_dict[key].append(value)
for key, value in vis_dict.items():
if phase == "test" and key == "content_code":
continue
if key == "meta":
secondary_keys = value[0].keys()
num = len(value)
vis_dict[key] = {
secondary_key: [to_float(item) for i in range(num) for item in value[i][secondary_key]]
for secondary_key in secondary_keys}
else:
vis_dict[key] = torch.cat(vis_dict[key], 0)
vis_dict[key] = vis_dict[key].cpu().numpy()
vis_dict[key] = to_float(vis_dict[key].reshape(vis_dict[key].shape[0], -1))
vis_dicts[phase] = vis_dict
np.savez_compressed(output_path, data=vis_dicts)
return vis_dicts
def get_demo_plots(data, output_path):
"""
data: {"train": dict_train, "test": dict_test}
dict_train: {"style2d_code": blabla, etc.}
"""
ensure_dirs(output_path)
def fig_title(title):
return pjoin(output_path, title)
style_labels = data["train"]["meta"]["style"]
adain_raw = []
for key in ["style2d_adain", "style3d_adain"]:
for phase in ["train", "test"]:
adain_raw.append(data[phase][key])
adain_tsne = calc_many_blas(adain_raw, calc_tsne)
plot2D_overlay([adain_tsne[0], adain_tsne[2]],
[style_labels, style_labels],
[1.0, 0.5],
fig_title(f'joint_embedding_adain_tsne'))
for key in ["style3d_code", "style3d_adain"]:
tsne_code = calc_tsne(data["train"][key])
plot2D(tsne_code, style_labels, fig_title(f'{key}_tsne'))
content_code_pca = calc_pca(data["train"]["content_code"])
indices = [i for i in range(len(data["train"]["meta"]["content"])) if data["train"]["meta"]["content"][i] == "walk"]
walk_code = content_code_pca[np.array(indices)]
phase_labels = [data["train"]["meta"]["phase"][i] for i in indices]
plot2D_phase(walk_code, phase_labels, fig_title(f'content_by_phase'))
plot2D(content_code_pca, style_labels, fig_title(f'content_by_style'))
def plot_demo(cfg):
BASEPATH = pjoin(os.path.dirname(__file__), '..')
output_path = pjoin(BASEPATH, "demo_results", "figures")
vis_dicts = get_all_codes(cfg, pjoin(output_path, 'output_codes.npz'))
get_demo_plots(vis_dicts, output_path) | null |
16,023 | import torch
import os
import sys
import argparse
import importlib
import numpy as np
from os.path import join as pjoin
from data_loader import get_dataloader
from latent_plot_utils import get_all_plots, get_demo_plots
from trainer import Trainer
from py_utils import to_float, ensure_dirs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--config', type=str, default='config')
return parser.parse_args() | null |
16,024 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
def init_2d_plot(fig, subplot_pos, scale):
ax = fig.add_subplot(subplot_pos)
ax.set_xlim(-scale*40, scale*40)
ax.set_ylim(-scale*40, scale*40)
ax.set_xticks([], [])
ax.set_yticks([], [])
return ax | null |
16,025 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
def init_3d_plot(fig, subplot_pos, scale):
ax = fig.add_subplot(subplot_pos, projection='3d') # This projection type determines the #axes
rscale = scale * 20 # 15
ax.set_xlim3d(-rscale, rscale)
ax.set_zlim3d(-rscale, rscale)
ax.set_ylim3d(-rscale, rscale)
facec = (254, 254, 254)
linec = (240, 240, 240)
facec = list(np.array(facec) / 256.0) + [1.0]
linec = list(np.array(linec) / 256.0) + [1.0]
ax.w_zaxis.set_pane_color(facec)
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
X = np.arange(-20, 25, 5)
Y = np.arange(-20, 25, 5)
xlen = len(X)
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
Z = np.zeros(X.shape) - rscale # place it at a lower surface
colortuple = (facec, linec)
colors = np.zeros((Z.shape + (4, )))
for y in range(ylen):
for x in range(xlen):
colors[y, x] = colortuple[(x + y) % len(colortuple)]
# Plot the surface with face colors taken from the array we made.
surf = ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0., zorder=-1, shade=False)
ax.w_zaxis.line.set_lw(0.)
ax.w_yaxis.line.set_lw(0.)
ax.w_yaxis.line.set_color(linec)
ax.w_xaxis.line.set_lw(0.)
ax.w_xaxis.line.set_color(linec)
ax.set_xticks([], [])
ax.set_yticks([], [])
ax.set_zticks([], [])
ax.view_init(20, -60) # -40 for the other direction
return ax | null |
16,026 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
limb_colors = [cmap(x) for x in np.arange(0, 1, 0.125)]
def init_lines(ax, anim, dim, color=limb_colors[0], scale=1.0):
init_pos = [[0, 0] for i in range(dim)]
return [ax.plot(*init_pos, color=color, zorder=3,
linewidth=2 * scale, solid_capstyle='round',
path_effects=[pe.Stroke(linewidth=3 * scale, foreground='black'),
pe.Normal()])[0] for _ in range(anim.shape[1])] | null |
16,027 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
joint_sizes = [3 for i in range(J)]
joint_sizes[head_index] = 7
def init_dots(ax, anim, dim, color='white', scale=1.0):
init_pos = [[0] for i in range(dim)]
return [ax.plot(*init_pos, color=color, zorder=3,
linewidth=2, linestyle='',
marker="o", markersize=joint_sizes[i] * scale,
path_effects=[pe.Stroke(linewidth=1.5 * scale, foreground='black'), pe.Normal()]
)[0] for i in range(anim.shape[1])] | null |
16,028 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
parents = np.array([-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 10, 13, 14, 15, 10, 17, 18, 19])
def _anim_skel(lines, dots, anim, dim, i):
i = min(i, len(anim) - 1)
if dim == 3:
for j in range(len(parents)):
if parents[j] != -1:
lines[j].set_data(
[ anim[i, j, 0], anim[i, parents[j], 0]],
[-anim[i, j, 2], -anim[i, parents[j], 2]])
lines[j].set_3d_properties(
[ anim[i, j, 1], anim[i, parents[j], 1]])
dots[j].set_data([anim[i, j, 0]], [-anim[i, j, 2]])
dots[j].set_3d_properties([anim[i, j, 1]])
else:
for j in range(len(parents)):
if parents[j] != -1:
lines[j].set_data(
[anim[i, j, 0], anim[i, parents[j], 0]],
[anim[i, j, 1], anim[i, parents[j], 1]])
dots[j].set_data([anim[i, j, 0]], [anim[i, j, 1]])
return [lines, dots] | null |
16,029 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
joint_foot_indices = [3, 4, 7, 8]
def _anim_foot_contact(dots, foot_contact, i):
i = min(i, len(foot_contact) - 1)
for j, f_idx in enumerate(joint_foot_indices):
color = 'red' if foot_contact[i, j] == 1.0 else 'blue'
dots[f_idx].set_color(color)
return [dots] | null |
16,030 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
The provided code snippet includes necessary dependencies for implementing the `glb2centered` function. Write a Python function `def glb2centered(glb)` to solve the following problem:
input: positions - glb [T, J, (3/2)] -- single clip! output: motion with average root (x(, z)) = (0(, 0))
Here is the function:
def glb2centered(glb):
"""
input: positions - glb [T, J, (3/2)] -- single clip!
output: motion with average root (x(, z)) = (0(, 0))
"""
root_avg = np.mean(glb[:, 0:1, :], axis=0, keepdims=True)
root_avg[0, 0, 1] = 0 # y shouldn't change
return glb - root_avg | input: positions - glb [T, J, (3/2)] -- single clip! output: motion with average root (x(, z)) = (0(, 0)) |
16,031 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
def to_numpy(data):
output = []
for d in data:
if isinstance(d, torch.Tensor):
output.append(d.detach().numpy())
else:
output.append(d)
return output | null |
16,032 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str)
args = parser.parse_args()
return args | null |
16,033 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
from py_utils import to_float
def load_output(filename):
data = torch.load(filename, map_location='cpu')
print(list(data.keys()))
return data | null |
16,034 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D, HandlerTuple
import tikzplotlib
from os.path import join as pjoin
from py_utils import ensure_dirs
def calc_pca_curve(raw):
pcan = PCA()
pcan.fit_transform(raw)
pct = pcan.explained_variance_ratio_
prefix = np.cumsum(pct / np.sum(pct))
fig = plt.figure(figsize=(4, 4))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])
ax.plot(list(range(1, 6)), prefix[:5])
ax.plot(2, prefix[1], "ro")
ax.annotate("{:.3f}% of variation".format(prefix[1] * 100),
(2, prefix[1]),
textcoords="offset points",
xytext=(60, -20),
ha="center")
ax.set_xticks(list(range(1, 6)))
ax.set_yticks(list(np.arange(0.5, 1.01, 0.1)))
ax.set_xlabel("number of components")
ax.set_ylabel("explained variance ratio")
name = "pca_curve"
tikzplotlib.save(name + ".tex", figure=fig, strict=True)
plt.savefig("pca_curve.png")
return pct | null |
16,035 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D, HandlerTuple
import tikzplotlib
from os.path import join as pjoin
from py_utils import ensure_dirs
def plot2D(data, labels, title):
x_min, x_max = np.min(data, axis=0), np.max(data, axis=0)
data = (data - x_min) / (x_max - x_min)
fig, ax = plt.subplots(figsize=(8, 8))
cjet = cm.get_cmap("jet")
num_labels, distinct_labels, indices = distinct_labels_and_indices(labels)
for i, label in enumerate(distinct_labels):
index = indices[label]
ax.scatter(data[index, 0], data[index, 1], label=label, c=[cjet(1.0 * i / num_labels)], linewidths=0.)
ax.legend(loc="center left", bbox_to_anchor=(1, 0, 1, 1),
title=title.split('/')[-1])
fig.tight_layout()
tikzplotlib.save("%s.tex" % title, figure=fig, strict=True)
plt.savefig("%s.png" % title)
return fig
def calc_tsne(raw):
global tsne
if tsne is None:
tsne = TSNE(n_components=2, init='pca', random_state=7) # n_iter = xxx
result = tsne.fit_transform(raw)
return result
def plot_tsne(raw, labels, title):
result = calc_tsne(raw)
return plot2D(result, labels, title) | null |
16,036 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D, HandlerTuple
import tikzplotlib
from os.path import join as pjoin
from py_utils import ensure_dirs
def plot2D(data, labels, title):
x_min, x_max = np.min(data, axis=0), np.max(data, axis=0)
data = (data - x_min) / (x_max - x_min)
fig, ax = plt.subplots(figsize=(8, 8))
cjet = cm.get_cmap("jet")
num_labels, distinct_labels, indices = distinct_labels_and_indices(labels)
for i, label in enumerate(distinct_labels):
index = indices[label]
ax.scatter(data[index, 0], data[index, 1], label=label, c=[cjet(1.0 * i / num_labels)], linewidths=0.)
ax.legend(loc="center left", bbox_to_anchor=(1, 0, 1, 1),
title=title.split('/')[-1])
fig.tight_layout()
tikzplotlib.save("%s.tex" % title, figure=fig, strict=True)
plt.savefig("%s.png" % title)
return fig
def calc_tsne(raw):
global tsne
if tsne is None:
tsne = TSNE(n_components=2, init='pca', random_state=7) # n_iter = xxx
result = tsne.fit_transform(raw)
return result
def plot_content_tsne(raw, slabels, clabels, title):
name = title + "_tsne"
path = name + ".npz"
if os.path.exists(path):
print("%s already exists" % path)
result = np.load(path, allow_pickle=True)["result"]
else:
print("start to produce %s" % path)
result = calc_tsne(raw)
np.savez_compressed(name, result=result)
plot2D(result, slabels, title + "_style_labels")
plot2D(result, clabels, title + "_content_labels") | null |
16,037 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D, HandlerTuple
import tikzplotlib
from os.path import join as pjoin
from py_utils import ensure_dirs
def show_images(images, titles, this_title, rows=1):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert (len(images) == len(titles))
n_images = len(images)
cols = np.ceil(n_images / float(rows))
# if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]
size = np.array((8, 8)) * np.array(rows, cols)
fig = plt.figure(figsize=size)
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(rows, cols, n + 1)
if image.ndim == 2:
plt.gray()
a.set_axis_off()
plt.imshow(image)
a.set_title(title)
fig.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.subplots_adjust(wspace=0, hspace=0)
# plt.show()
plt.savefig("%s.png" % this_title, dpi=150, bbox_inches='tight', pad_inches=0)
return fig
def show_images_from_disk(path, titles, rows, this_title):
images = []
for title in titles:
name = "%s.png" % title
input_path = os.path.join(path, name)
images.append(plt.imread(input_path))
this_title = os.path.join(path, this_title)
return show_images(images, titles, this_title, rows) | null |
16,038 | import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.optim import lr_scheduler
from model import Model
from py_utils import update_dict
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and
key in f and ".pt" in f]
if gen_models is None or len(gen_models) == 0:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name | null |
16,039 | import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.optim import lr_scheduler
from model import Model
from py_utils import update_dict
def get_scheduler(optimizer, config, it=-1):
lr_policy = config.lr_policy
if lr_policy is None or lr_policy == 'constant':
scheduler = None # constant scheduler
elif lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=config.step_size,
gamma=config.step_gamma, last_epoch=it)
else:
return NotImplementedError('%s not implemented', lr_policy)
return scheduler | null |
16,040 | import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.optim import lr_scheduler
from model import Model
from py_utils import update_dict
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun | null |
16,041 | import os
import sys
import random
import torch
import numpy as np
import argparse
BASEPATH = os.path.dirname(__file__)
from os.path import join as pjoin
from torch.utils.data import Dataset, DataLoader
from utils.animation_data import AnimationData
from utils.animation_2d_data import AnimationData2D
from utils.load_skeleton import Skel
from config import Config
from py_utils import print_composite
def normalize_motion(motion, mean_pose, std_pose):
def single_to_batch(data):
class AnimationData2D:
def __init__(self, projection):
def get_style2d(self):
def get_projection(self):
def from_style2d(cls, style2d):
def from_openpose_json(cls, json_dir, scale=0.07, smooth=True):
def process_single_json(json_dir, config, norm_data_path=pjoin(BASEPATH, 'data/treadmill_norm/test2d.npz'), scale=0.07, smooth=True, to_batch=False):
def to_tensor(x):
return torch.tensor(x).float().to(config.device)
anim2d = AnimationData2D.from_openpose_json(json_dir, scale=scale, smooth=smooth)
style2d = to_tensor(anim2d.get_style2d())
data = {"meta": {"style": "test", "content": json_dir.split('/')[-1]},
"style2draw": style2d}
norm = np.load(norm_data_path, allow_pickle=True)
data["style2d"] = normalize_motion(style2d,
to_tensor(norm['mean']).unsqueeze(-1),
to_tensor(norm['std']).unsqueeze(-1))
if to_batch:
data = single_to_batch(data)
return data | null |
16,042 | import os
import sys
import random
import torch
import numpy as np
import argparse
from os.path import join as pjoin
from torch.utils.data import Dataset, DataLoader
from utils.animation_data import AnimationData
from utils.animation_2d_data import AnimationData2D
from utils.load_skeleton import Skel
from config import Config
from py_utils import print_composite
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--config', type=str, default='config')
return parser.parse_args() | null |
16,043 | import os
import sys
import random
import torch
import numpy as np
import argparse
from os.path import join as pjoin
from torch.utils.data import Dataset, DataLoader
from utils.animation_data import AnimationData
from utils.animation_2d_data import AnimationData2D
from utils.load_skeleton import Skel
from config import Config
from py_utils import print_composite
def process_single_bvh(filename, config, norm_data_dir=None, downsample=4, skel=None, to_batch=False):
def to_tensor(x):
return torch.tensor(x).float().to(config.device)
anim = AnimationData.from_BVH(filename, downsample=downsample, skel=skel, trim_scale=4)
foot_contact = anim.get_foot_contact(transpose=True) # [4, T]
content = to_tensor(anim.get_content_input())
style3d = to_tensor(anim.get_style3d_input())
data = {"meta": {"style": "test", "content": filename.split('/')[-1]},
"foot_contact": to_tensor(foot_contact),
"contentraw": content,
"style3draw": style3d
}
if norm_data_dir is None:
norm_data_dir = config.extra_data_dir
for key, raw in zip(["content", "style3d"], [content, style3d]):
norm_path = os.path.join(norm_data_dir, f'train_{key}.npz')
norm = np.load(norm_path, allow_pickle=True)
data[key] = normalize_motion(raw,
to_tensor(norm['mean']).unsqueeze(-1),
to_tensor(norm['std']).unsqueeze(-1))
if to_batch:
data = single_to_batch(data)
return data
class Config:
for_try = False # set to True only if you want to quickly check if all parts (latent space visualization, result output, etc.) function correctly
# Save & Visualization
name = 'pretrained' # name of the experiment, for training from scratch please use a different name
cuda_id = 0
# hyyyper params
use_rotloss = True
use_newdecoder = True
# data paths
data_dir = pjoin(BASEPATH, 'data')
expr_dir = BASEPATH
data_filename = "xia.npz" # change to 'bfa.npz' for training on bfa data
data_path = pjoin(data_dir, data_filename)
extra_data_dir = pjoin(data_dir, data_filename.split('.')[-2].split('/')[-1] + "_norms")
# model paths
main_dir = None
model_dir = None
tb_dir = None
info_dir = None
output_dir = None
vis_freq = 100
log_freq = 100
save_freq = 50000
mt_save_iter = 50000 # How often do you want to save output images during training
mt_display_iter = 5000 # How often do you want to display output images during training
mt_batch_n = 1 # number of batches to save in training
# optimization options
max_iter = 300000 # maximum number of training iterations
weight_decay = 0.0001 # weight decay
lr_gen = 0.0001 # learning rate for the generator
lr_dis = 0.0001 # learning rate for the discriminator
weight_init = 'kaiming' # initialization [gaussian/kaiming/xavier/orthogonal]
lr_policy = None
# Training
batch_size = 128
# Testing
test_batch_n = 56 # number of test clips
if for_try:
vis_freq = 1
log_freq = 1
save_freq = 5
# logger options
mt_save_iter = 2 # How often do you want to save output images during training
mt_display_iter = 3 # How often do you want to display output images during training
mt_batch_n = 1 # number of batches to save in training
# max_iter = 10 # maximum number of training iterations
batch_size = 16
# dataset
dataset_norm_config = { # specify the prefix of mean/std
"train":
{"content": None, "style3d": None, "style2d": None}, # will be named automatically as "train_content", etc.
"test":
{"content": "train", "style3d": "train", "style2d": "train"},
"trainfull":
{"content": "train", "style3d": "train", "style2d": "train"}
}
# input: T * 64
rot_channels = 128 # added one more y-axis rotation
pos3d_channels = 64 # changed to be the same as rfree
proj_channels = 42
num_channel = rot_channels
num_style_joints = 21
style_channel_2d = proj_channels
style_channel_3d = pos3d_channels
"""
encoder for class
[down_n] stride=[enc_cl_stride], dim=[enc_cl_channels] convs,
followed by [enc_cl_global_pool]
"""
enc_cl_down_n = 2 # 64 -> 32 -> 16 -> 8 -> 4
enc_cl_channels = [0, 96, 144]
enc_cl_kernel_size = 8
enc_cl_stride = 2
"""
encoder for content
[down_n] stride=[enc_co_stride], dim=[enc_co_channels] convs (with IN)
followed by [enc_co_resblks] resblks with IN
"""
enc_co_down_n = 1 # 64 -> 32 -> 16 -> 8
enc_co_channels = [num_channel, 144]
enc_co_kernel_size = 8
enc_co_stride = 2
enc_co_resblks = 1
"""
mlp
map from class output [enc_cl_channels[-1] * 1]
to AdaIN params (dim calculated at runtime)
"""
mlp_dims = [enc_cl_channels[-1], 192, 256]
"""
decoder
[dec_resblks] resblks with AdaIN
[dec_up_n] Upsampling followed by stride=[dec_stride] convs
"""
dec_bt_channel = 144
dec_resblks = enc_co_resblks
dec_channels = enc_co_channels.copy()
dec_channels.reverse()
dec_channels[-1] = 31 * 4 # Let it output rotations only
dec_up_n = enc_co_down_n
dec_kernel_size = 8
dec_stride = 1
"""
discriminator
1) conv w/o acti or norm, keeps dims
2) [disc_down_n] *
(ActiFirstResBlk(channel[i], channel[i])
+ ActiFirstResBlk(channel[i], channel[i + 1])
+ AvgPool(pool_size, pool_stride))
3) 2 ActiFirstResBlks that keep dims(channel[-1])
4) conv, [channel[-1] -> num_classes]
"""
disc_channels = [pos3d_channels, 96, 144]
disc_down_n = 2 # 64 -> 32 -> 16 -> 8 -> 4
disc_kernel_size = 6
disc_stride = 1
disc_pool_size = 3
disc_pool_stride = 2
num_classes = 8 # set to 16 for training on bfa data
gan_w = 1
rec_w = 1
rrec_w = 1
feat_w = 0.5
qt_w = 0.1
joint_w = 0.3
triplet_w = 0.3
triplet_margin = 5
twist_w = 1
twist_alpha = 100
trans_weight = 0.5
device = None
gpus = 1
def initialize(self, args=None, save=True):
if hasattr(args, 'name') and args.name is not None:
print("args.name= ", args.name)
self.name = args.name
if hasattr(args, 'batch_size') and args.name is not None:
self.batch_size = args.batch_size
self.main_dir = os.path.join(self.expr_dir, self.name)
self.model_dir = os.path.join(self.main_dir, "pth")
self.tb_dir = os.path.join(self.main_dir, "log")
self.info_dir = os.path.join(self.main_dir, "info")
self.output_dir = os.path.join(self.main_dir, "output")
ensure_dirs([self.main_dir, self.model_dir, self.tb_dir, self.info_dir, self.output_dir, self.extra_data_dir])
self.device = torch.device("cuda:%d" % self.cuda_id if torch.cuda.is_available() else "cpu")
if save:
self.config_name = args.config
cfg_file = "%s.py" % self.config_name
shutil.copy(pjoin(BASEPATH, cfg_file), os.path.join(self.info_dir, cfg_file))
def print_composite(data, beg=""):
if isinstance(data, dict):
print(f'{beg} dict, size = {len(data)}')
for key, value in data.items():
print(f' {beg}{key}:')
print_composite(value, beg + " ")
elif isinstance(data, list):
print(f'{beg} list, len = {len(data)}')
for i, item in enumerate(data):
print(f' {beg}item {i}')
print_composite(item, beg + " ")
elif isinstance(data, np.ndarray) or isinstance(data, torch.Tensor):
print(f'{beg} array of size {data.shape}')
else:
print(f'{beg} {data}')
The provided code snippet includes necessary dependencies for implementing the `test_dataset` function. Write a Python function `def test_dataset(args)` to solve the following problem:
train_dataset = MotionNorm(config, "train") print_composite(train_dataset[0]) data_loader = DataLoader(train_dataset, batch_size=2, shuffle=False) for batch in data_loader: print_composite(batch) break
Here is the function:
def test_dataset(args):
config = Config()
config.initialize(args)
data = process_single_bvh('data_proc/styletransfer/proud_03_001.bvh', config, to_batch=True)
print_composite(data)
"""
train_dataset = MotionNorm(config, "train")
print_composite(train_dataset[0])
data_loader = DataLoader(train_dataset, batch_size=2, shuffle=False)
for batch in data_loader:
print_composite(batch)
break
""" | train_dataset = MotionNorm(config, "train") print_composite(train_dataset[0]) data_loader = DataLoader(train_dataset, batch_size=2, shuffle=False) for batch in data_loader: print_composite(batch) break |
16,044 | import os
import sys
import numpy as np
import yaml
import argparse
import shutil
from copy import deepcopy
from os.path import join as pjoin
from utils.animation_data import AnimationData
from utils.load_skeleton import Skel
def divide_clip_xia(input, window, window_step, divide):
if not divide: # return the whole clip
t = ((input.shape[0]) // 4) * 4 + 4
t = max(t, 12)
if len(input) < t:
input = pad_to_window(input, t)
return [input]
windows = []
j = -(window // 4)
total = len(input)
while True:
slice = input[max(j, 0): j + window].copy() # remember to COPY!!
if len(slice) < window:
slice = pad_to_window(slice, window)
windows.append(slice)
j += window_step
if total - j < (3 * window) // 4:
break
return windows
def process_file(filename, divider, window, window_step, downsample=4, skel=None, divide=True):
input = bvh_to_motion_and_phase(filename, downsample=downsample, skel=skel) # [T, xxx]
return divider(input, window=window, window_step=window_step, divide=divide)
def get_bvh_files(directory):
return [os.path.join(directory, f) for f in sorted(list(os.listdir(directory)))
if os.path.isfile(os.path.join(directory, f))
and f.endswith('.bvh') and f != 'rest.bvh']
def set_init(dic, key, value):
try:
dic[key]
except KeyError:
dic[key] = value
def motion_and_phase_to_dict(fulls, style, meta):
"""
fulls: a list of [T, xxx + 1] - motion and phase
style: a *number*
meta: a dict, e.g. {"style": "angry", "content": "walk"}
"""
output = []
for full in fulls:
motion, phase = full[:, :-1], full[:, -1]
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy(meta)
meta_copy["phase"] = phase_label
output.append({
"motion": motion,
"style": style,
"meta": meta_copy
})
return output
class Skel:
def __init__(self, filename=os.path.join(BASEPATH, "..", "style_transfer", "global_info", "skeleton_CMU.yml")):
f = open(filename, "r")
skel = yaml.load(f, Loader=yaml.Loader)
self.bvh_name = os.path.join(os.path.dirname(filename), skel['BVH'])
self.rest_bvh = BVH.load(self.bvh_name)
self.offset = np.array(skel['offsets'])
self.topology = np.array(skel['parents'])
self.chosen_joints = np.array(skel['chosen_joints'])
self.chosen_parents = np.array(skel['chosen_parents'])
self.fid_l, self.fid_r = skel['left_foot'], skel['right_foot']
self.hips, self.sdrs = skel['hips'], skel['shoulders']
self.head = skel['head']
self.visualization = skel['visualization']
def generate_database_xia(bvh_path, output_path, window, window_step, dataset_config='xia_dataset.yml'):
with open(dataset_config, "r") as f:
cfg = yaml.load(f, Loader=yaml.Loader)
content_namedict = [full_name.split('_')[0] for full_name in cfg["content_full_names"]]
content_test_cnt = cfg["content_test_cnt"]
content_names = cfg["content_names"]
style_names = cfg["style_names"]
style_name_to_idx = {name: i for i, name in enumerate(style_names)}
skel = Skel()
bvh_files = get_bvh_files(bvh_path)
train_inputs = []
test_inputs = []
trainfull_inputs = []
test_files = []
test_cnt = {} # indexed by content_style
for i, item in enumerate(bvh_files):
print('Processing %i of %i (%s)' % (i, len(bvh_files), item))
filename = item.split('/')[-1]
style, content_idx, _ = filename.split('_')
content = content_namedict[int(content_idx) - 1]
content_style = "%s_%s" % (content, style)
uclip = motion_and_phase_to_dict(process_file(item, divider=divide_clip_xia, window=window, window_step=window_step,
skel=skel, divide=False),
style_name_to_idx[style],
{"style": style, "content": content})
# Whether this should be a test clip
set_init(test_cnt, content_style, 0)
if test_cnt[content_style] < content_test_cnt[content]:
test_cnt[content_style] += 1
test_inputs += uclip
test_files.append(filename)
else:
trainfull_inputs += uclip
clips = motion_and_phase_to_dict(process_file(item, divider=divide_clip_xia, window=window, window_step=window_step,
skel=skel, divide=True),
style_name_to_idx[style],
{"style": style, "content": content})
train_inputs += clips
data_dict = {}
data_info = {}
for subset, inputs in zip(["train", "test", "trainfull"], [train_inputs, test_inputs, trainfull_inputs]):
motions = [input["motion"] for input in inputs]
styles = [input["style"] for input in inputs]
meta = {key: [input["meta"][key] for input in inputs] for key in inputs[0]["meta"].keys()}
data_dict[subset] = {"motion": motions, "style": styles, "meta": meta}
"""compute meta info"""
num_clips = len(motions)
info = {"num_clips": num_clips,
"distribution":
{style:
{content: len([i for i in range(num_clips) if meta["style"][i] == style and meta["content"][i] == content])
for content in content_names}
for style in style_names}
}
data_info[subset] = info
np.savez_compressed(output_path + ".npz", **data_dict)
info_file = output_path + ".info"
data_info["test_files"] = test_files
with open(info_file, "w") as f:
yaml.dump(data_info, f, sort_keys=False)
test_folder = output_path + "_test"
if not os.path.exists(test_folder):
os.makedirs(test_folder)
for file in test_files:
shutil.copy(pjoin(bvh_path, file), pjoin(test_folder, file)) | null |
16,045 | import os
import sys
import numpy as np
import yaml
import argparse
import shutil
from copy import deepcopy
from os.path import join as pjoin
from utils.animation_data import AnimationData
from utils.load_skeleton import Skel
def bvh_to_motion_and_phase(filename, downsample, skel):
def divide_clip_bfa(input, window, window_step, divide):
def get_bvh_files(directory):
def motion_and_phase_to_dict(fulls, style, meta):
class Skel:
def __init__(self, filename=os.path.join(BASEPATH, "..", "style_transfer", "global_info", "skeleton_CMU.yml")):
def generate_database_bfa(bvh_path, output_path, window, window_step, downsample=4, dataset_config='bfa_dataset.yml'):
with open(dataset_config, "r") as f:
cfg = yaml.load(f, Loader=yaml.Loader)
style_names = cfg["style_names"]
style_name_to_idx = {name: i for i, name in enumerate(style_names)}
skel = Skel()
bvh_files = get_bvh_files(bvh_path)
train_inputs = []
test_inputs = []
trainfull_inputs = []
group_size = 10 # pick the last clip from every group_size clips for test
test_window = window * 2
for i, item in enumerate(bvh_files):
print('Processing %i of %i (%s)' % (i, len(bvh_files), item))
filename = item.split('/')[-1]
style, _ = filename.split('_')
style_idx = style_name_to_idx[style]
raw = bvh_to_motion_and_phase(item, downsample=downsample, skel=skel) # [T, xxx]
total_length = len(raw)
group_length = test_window * group_size
for st in range(0, total_length, group_length):
ed = st + group_length
if ed <= total_length:
test_clips = motion_and_phase_to_dict([raw[ed - test_window: ed]], style_idx, {"style": style})
test_inputs += test_clips
train_clips = motion_and_phase_to_dict(divide_clip_bfa(raw[st: ed - test_window],
window=window, window_step=window_step, divide=True),
style_idx, {"style": style})
trainfull_clips = motion_and_phase_to_dict(divide_clip_bfa(raw[st: ed - test_window],
window=test_window, window_step=test_window, divide=True),
style_idx, {"style": style})
train_inputs += train_clips
trainfull_inputs += trainfull_clips
data_dict = {}
data_info = {}
for subset, inputs in zip(["train", "test", "trainfull"], [train_inputs, test_inputs, trainfull_inputs]):
motions = [input["motion"] for input in inputs]
styles = [input["style"] for input in inputs]
meta = {key: [input["meta"][key] for input in inputs] for key in inputs[0]["meta"].keys()}
data_dict[subset] = {"motion": motions, "style": styles, "meta": meta}
"""compute meta info"""
num_clips = len(motions)
info = {"num_clips": num_clips,
"distribution":
{style: len([i for i in range(num_clips) if meta["style"][i] == style])
for style in style_names}
}
data_info[subset] = info
np.savez_compressed(output_path + ".npz", **data_dict)
info_file = output_path + ".info"
with open(info_file, "w") as f:
yaml.dump(data_info, f, sort_keys=False) | null |
16,046 | import os
import sys
import numpy as np
import yaml
import argparse
import shutil
from copy import deepcopy
from os.path import join as pjoin
from utils.animation_data import AnimationData
from utils.load_skeleton import Skel
def parse_args():
parser = argparse.ArgumentParser("export_train")
parser.add_argument("--dataset", type=str, default="xia")
parser.add_argument("--bvh_path", type=str, default="styletransfer")
parser.add_argument("--output_path", type=str, default="xia_data")
parser.add_argument("--window", type=int, default=48)
parser.add_argument("--window_step", type=int, default=8)
parser.add_argument("--dataset_config", type=str, default='../global_info/xia_dataset.yml')
return parser.parse_args() | null |
16,047 | import os
import sys
import numpy as np
import torch
import argparse
from tqdm import tqdm
from os.path import join as pjoin
import utils.BVH as BVH
from utils.InverseKinematics import JacobianInverseKinematics
from utils.animation_data import AnimationData
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="bla_3d")
return parser.parse_args() | null |
16,048 | import os
import sys
import numpy as np
import torch
import argparse
from tqdm import tqdm
from os.path import join as pjoin
import utils.BVH as BVH
from utils.InverseKinematics import JacobianInverseKinematics
from utils.animation_data import AnimationData
def save_bvh_from_network_output(nrot, output_path):
anim = AnimationData.from_network_output(nrot)
bvh, names, ftime = anim.get_BVH()
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
BVH.save(output_path, bvh, names, ftime)
def remove_fs(anim, foot, output_path, fid_l=(4, 5), fid_r=(9, 10), interp_length=5, force_on_floor=True):
(anim, names, ftime), glb = nrot2anim(anim)
T = len(glb)
fid = list(fid_l) + list(fid_r)
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
foot_heights = np.minimum(glb[:, fid_l, 1],
glb[:, fid_r, 1]).min(axis=1) # [T, 2] -> [T]
# print(np.min(foot_heights))
floor_height = softmin(foot_heights, softness=0.5, axis=0)
# print(floor_height)
glb[:, :, 1] -= floor_height
anim.positions[:, 0, 1] -= floor_height
for i, fidx in enumerate(fid):
fixed = foot[i] # [T]
"""
for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25)
"""
s = 0
while s < T:
while s < T and fixed[s] == 0:
s += 1
if s >= T:
break
t = s
avg = glb[t, fidx].copy()
while t + 1 < T and fixed[t + 1] == 1:
t += 1
avg += glb[t, fidx].copy()
avg /= (t - s + 1)
if force_on_floor:
avg[1] = 0.0
for j in range(s, t + 1):
glb[j, fidx] = avg.copy()
# print(fixed[s - 1:t + 2])
s = t + 1
for s in range(T):
if fixed[s] == 1:
continue
l, r = None, None
consl, consr = False, False
for k in range(interp_length):
if s - k - 1 < 0:
break
if fixed[s - k - 1]:
l = s - k - 1
consl = True
break
for k in range(interp_length):
if s + k + 1 >= T:
break
if fixed[s + k + 1]:
r = s + k + 1
consr = True
break
if not consl and not consr:
continue
if consl and consr:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)),
ritp, litp)
glb[s, fidx] = itp.copy()
continue
if consl:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
glb[s, fidx] = litp.copy()
continue
if consr:
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
glb[s, fidx] = ritp.copy()
targetmap = {}
for j in range(glb.shape[1]):
targetmap[j] = glb[:, j]
ik = JacobianInverseKinematics(anim, targetmap, iterations=10, damping=4.0,
silent=False)
ik()
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
BVH.save(output_path, anim, names, ftime)
def process_data(filename, style_and_content=True, output_dir=None, selected=None):
data = torch.load(filename, map_location="cpu")
feet = data["foot_contact"]
styles = data["style"]
contents = data["content"]
motions = data["trans"]
if selected is None:
selected = range(len(motions))
for num in tqdm(selected):
foot = feet[num].detach().numpy()
if style_and_content:
style = styles[num].detach().numpy()
content = contents[num].detach().numpy()
save_bvh_from_network_output(style.copy(), output_path=pjoin(output_dir, "style_%02d.bvh" % num))
save_bvh_from_network_output(content.copy(), output_path=pjoin(output_dir, "content_%02d.bvh" % num))
motion = motions[num].detach().numpy()
save_bvh_from_network_output(motion, output_path=pjoin(output_dir, "raw_%02d.bvh" % num))
remove_fs(motion, foot, output_path=pjoin(output_dir, "after_%02d.bvh" % num)) | null |
16,049 | import os
import numpy as np
import torch
def merge_dict(dict_list):
ret = {}
for dict in dict_list:
for key, value in dict.items():
try:
ret[key]
except KeyError:
ret[key] = 0.0
ret[key] += value
return ret | null |
16,050 | import os
import numpy as np
import torch
def update_dict(old_dict, new_dict):
for key, value in new_dict.items():
old_dict[key] = value | null |
16,051 | import os
import numpy as np
import torch
def write_loss(iterations, trainer, train_writer):
for key, value in trainer.loss_dict.items():
train_writer.add_scalar(key, value, iterations + 1) | null |
16,052 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
import numpy as np
from kinematics import ForwardKinematics
from blocks import ConvBlock, ResBlock, LinearBlock, \
BottleNeckResBlock, Upsample, ConvLayers, ActiFirstResBlock, \
get_conv_pad, get_norm_layer
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm1d":
mean = adain_params[: , : m.num_features]
std = adain_params[: , m.num_features: 2 * m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2 * m.num_features:
adain_params = adain_params[: , 2 * m.num_features:] | null |
16,053 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
import numpy as np
from kinematics import ForwardKinematics
from blocks import ConvBlock, ResBlock, LinearBlock, \
BottleNeckResBlock, Upsample, ConvLayers, ActiFirstResBlock, \
get_conv_pad, get_norm_layer
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm1d":
num_adain_params += 2 * m.num_features
return num_adain_params | null |
16,054 | import torch
import torch.nn as nn
import torch.nn.functional as F
def get_conv_pad(kernel_size, stride, padding=nn.ReflectionPad1d):
pad_l = (kernel_size - stride) // 2
pad_r = (kernel_size - stride) - pad_l
return padding((pad_l, pad_r)) | null |
16,055 | import torch
import torch.nn as nn
import torch.nn.functional as F
def ConvLayers(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', use_bias=True):
"""
returns a list of [pad, conv] => should be += to some list, then apply sequential
"""
if pad_type == 'reflect':
pad = nn.ReflectionPad1d
elif pad_type == 'replicate':
pad = nn.ReplicationPad1d
elif pad_type == 'zero':
pad = ZeroPad1d
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
pad_l = (kernel_size - 1) // 2
pad_r = kernel_size - 1 - pad_l
return [pad((pad_l, pad_r)),
nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size,
stride=stride, bias=use_bias)]
def get_acti_layer(acti='relu', inplace=True):
if acti == 'relu':
return [nn.ReLU(inplace=inplace)]
elif acti == 'lrelu':
return [nn.LeakyReLU(0.2, inplace=inplace)]
elif acti == 'tanh':
return [nn.Tanh()]
elif acti == 'none':
return []
else:
assert 0, "Unsupported activation: {}".format(acti)
def get_norm_layer(norm='none', norm_dim=None):
if norm == 'bn':
return [nn.BatchNorm1d(norm_dim)]
elif norm == 'in':
# return [nn.InstanceNorm1d(norm_dim, affine=False)] # for rt42!
return [nn.InstanceNorm1d(norm_dim, affine=True)]
elif norm == 'adain':
return [AdaptiveInstanceNorm1d(norm_dim)]
elif norm == 'none':
return []
else:
assert 0, "Unsupported normalization: {}".format(norm)
def get_dropout_layer(dropout=None):
if dropout is not None:
return [nn.Dropout(p=dropout)]
else:
return []
The provided code snippet includes necessary dependencies for implementing the `ConvBlock` function. Write a Python function `def ConvBlock(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', dropout=None, norm='none', acti='lrelu', acti_first=False, use_bias=True, inplace=True)` to solve the following problem:
returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm]
Here is the function:
def ConvBlock(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', dropout=None,
norm='none', acti='lrelu', acti_first=False, use_bias=True, inplace=True):
"""
returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm]
"""
layers = ConvLayers(kernel_size, in_channels, out_channels, stride=stride, pad_type=pad_type, use_bias=use_bias)
layers += get_dropout_layer(dropout)
layers += get_norm_layer(norm, norm_dim=out_channels)
acti_layers = get_acti_layer(acti, inplace=inplace)
if acti_first:
return acti_layers + layers
else:
return layers + acti_layers | returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm] |
16,056 | import torch
import torch.nn as nn
import torch.nn.functional as F
def get_acti_layer(acti='relu', inplace=True):
if acti == 'relu':
return [nn.ReLU(inplace=inplace)]
elif acti == 'lrelu':
return [nn.LeakyReLU(0.2, inplace=inplace)]
elif acti == 'tanh':
return [nn.Tanh()]
elif acti == 'none':
return []
else:
assert 0, "Unsupported activation: {}".format(acti)
def get_norm_layer(norm='none', norm_dim=None):
if norm == 'bn':
return [nn.BatchNorm1d(norm_dim)]
elif norm == 'in':
# return [nn.InstanceNorm1d(norm_dim, affine=False)] # for rt42!
return [nn.InstanceNorm1d(norm_dim, affine=True)]
elif norm == 'adain':
return [AdaptiveInstanceNorm1d(norm_dim)]
elif norm == 'none':
return []
else:
assert 0, "Unsupported normalization: {}".format(norm)
def get_dropout_layer(dropout=None):
if dropout is not None:
return [nn.Dropout(p=dropout)]
else:
return []
def LinearBlock(in_dim, out_dim, dropout=None, norm='none', acti='relu'):
use_bias = True
layers = []
layers.append(nn.Linear(in_dim, out_dim, bias=use_bias))
layers += get_dropout_layer(dropout)
layers += get_norm_layer(norm, norm_dim=out_dim)
layers += get_acti_layer(acti)
return layers | null |
16,057 | import sys
import os
from option_parser import get_std_bvh
import BVH as BVH
import numpy as np
from datasets.bvh_parser import BVH_file
import Animation
def batch(char, suffix, prefix):
input_path = os.path.join(prefix, 'results/bvh')
all_err = []
ref_file = get_std_bvh(dataset=char)
ref_file = BVH_file(ref_file)
height = ref_file.get_height()
test_num = 0
new_p = os.path.join(input_path, char)
files = [f for f in os.listdir(new_p) if
f.endswith('_{}.bvh'.format(suffix)) and not f.endswith('_gt.bvh') and 'fix' not in f and not f.endswith('_input.bvh')]
for file in files:
file_full = os.path.join(new_p, file)
anim, names, _ = BVH.load(file_full)
test_num += 1
index = []
for i, name in enumerate(names):
if 'virtual' in name:
continue
index.append(i)
file_ref = file_full[:-6] + '_gt.bvh'
anim_ref, _, _ = BVH.load(file_ref)
pos = Animation.positions_global(anim) # [T, J, 3]
pos_ref = Animation.positions_global(anim_ref)
pos = pos[:, index, :]
pos_ref = pos_ref[:, index, :]
err = (pos - pos_ref) * (pos - pos_ref)
err /= height ** 2
err = np.mean(err)
all_err.append(err)
all_err = np.array(all_err)
return all_err.mean()
def full_batch(suffix, prefix):
res = []
chars = ['Mousey_m', 'Goblin_m', 'Mremireh_m', 'Vampire_m']
for char in chars:
res.append(batch(char, suffix, prefix))
return res | null |
16,058 | import os
import torch
from models import create_model
from datasets import create_dataset
import option_parser
def eval_prepare(args):
character = []
file_id = []
character_names = []
character_names.append(args.input_bvh.split('/')[-2])
character_names.append(args.target_bvh.split('/')[-2])
if args.test_type == 'intra':
if character_names[0].endswith('_m'):
character = [['BigVegas', 'BigVegas'], character_names]
file_id = [[0, 0], [args.input_bvh, args.input_bvh]]
src_id = 1
else:
character = [character_names, ['Goblin_m', 'Goblin_m']]
file_id = [[args.input_bvh, args.input_bvh], [0, 0]]
src_id = 0
elif args.test_type == 'cross':
if character_names[0].endswith('_m'):
character = [[character_names[1]], [character_names[0]]]
file_id = [[0], [args.input_bvh]]
src_id = 1
else:
character = [[character_names[0]], [character_names[1]]]
file_id = [[args.input_bvh], [0]]
src_id = 0
else:
raise Exception('Unknown test type')
return character, file_id, src_id | null |
16,059 | import os
import torch
from models import create_model
from datasets import create_dataset
import option_parser
def recover_space(file):
l = file.split('/')
l[-1] = l[-1].replace('_', ' ')
return '/'.join(l) | null |
16,060 | import os
from datasets.bvh_parser import BVH_file
from datasets.bvh_writer import BVH_writer
from models.IK import fix_foot_contact
from os.path import join as pjoin
def copy_ref_file(src, dst):
file = BVH_file(src)
writer = BVH_writer(file.edges, file.names)
writer.write_raw(file.to_tensor(quater=True)[..., ::2], 'quaternion', dst)
def get_height(file):
file = BVH_file(file)
return file.get_height()
def fix_foot_contact(input_file, foot_file, output_file, ref_height):
anim, name, ftime = BVH.load(input_file)
fid = get_ee_id_by_names(name)
contact = get_foot_contact(foot_file, ref_height)
glb = Animation.positions_global(anim) # [T, J, 3]
T = glb.shape[0]
for i, fidx in enumerate(fid): # fidx: index of the foot joint
fixed = contact[:, i] # [T]
s = 0
while s < T:
while s < T and fixed[s] == 0:
s += 1
if s >= T:
break
t = s
avg = glb[t, fidx].copy()
while t + 1 < T and fixed[t + 1] == 1:
t += 1
avg += glb[t, fidx].copy()
avg /= (t - s + 1)
for j in range(s, t + 1):
glb[j, fidx] = avg.copy()
s = t + 1
for s in range(T):
if fixed[s] == 1:
continue
l, r = None, None
consl, consr = False, False
for k in range(L):
if s - k - 1 < 0:
break
if fixed[s - k - 1]:
l = s - k - 1
consl = True
break
for k in range(L):
if s + k + 1 >= T:
break
if fixed[s + k + 1]:
r = s + k + 1
consr = True
break
if not consl and not consr:
continue
if consl and consr:
litp = lerp(alpha(1.0 * (s - l + 1) / (L + 1)),
glb[s, fidx], glb[l, fidx])
ritp = lerp(alpha(1.0 * (r - s + 1) / (L + 1)),
glb[s, fidx], glb[r, fidx])
itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)),
ritp, litp)
glb[s, fidx] = itp.copy()
continue
if consl:
litp = lerp(alpha(1.0 * (s - l + 1) / (L + 1)),
glb[s, fidx], glb[l, fidx])
glb[s, fidx] = litp.copy()
continue
if consr:
ritp = lerp(alpha(1.0 * (r - s + 1) / (L + 1)),
glb[s, fidx], glb[r, fidx])
glb[s, fidx] = ritp.copy()
# glb is ready
anim = anim.copy()
rot = torch.tensor(anim.rotations.qs, dtype=torch.float)
pos = torch.tensor(anim.positions[:, 0, :], dtype=torch.float)
offset = torch.tensor(anim.offsets, dtype=torch.float)
glb = torch.tensor(glb, dtype=torch.float)
ik_solver = InverseKinematics(rot, pos, offset, anim.parents, glb)
print('Fixing foot contact using IK...')
for i in tqdm(range(50)):
ik_solver.step()
rotations = ik_solver.rotations.detach()
norm = torch.norm(rotations, dim=-1, keepdim=True)
rotations /= norm
anim.rotations = Quaternions(rotations.numpy())
anim.positions[:, 0, :] = ik_solver.position.detach().numpy()
BVH.save(output_file, anim, name, ftime)
def example(src_name, dest_name, bvh_name, test_type, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
input_file = './datasets/Mixamo/{}/{}'.format(src_name, bvh_name)
ref_file = './datasets/Mixamo/{}/{}'.format(dest_name, bvh_name)
copy_ref_file(input_file, pjoin(output_path, 'input.bvh'))
copy_ref_file(ref_file, pjoin(output_path, 'gt.bvh'))
height = get_height(input_file)
bvh_name = bvh_name.replace(' ', '_')
input_file = './datasets/Mixamo/{}/{}'.format(src_name, bvh_name)
ref_file = './datasets/Mixamo/{}/{}'.format(dest_name, bvh_name)
cmd = 'python eval_single_pair.py --input_bvh={} --target_bvh={} --output_filename={} --test_type={}'.format(
input_file, ref_file, pjoin(output_path, 'result.bvh'), test_type
)
os.system(cmd)
fix_foot_contact(pjoin(output_path, 'result.bvh'),
pjoin(output_path, 'input.bvh'),
pjoin(output_path, 'result.bvh'),
height) | null |
16,061 | import os
from models import create_model
from datasets import create_dataset, get_character_names
import option_parser
import torch
from tqdm import tqdm
def create_model(args, character_names, dataset):
if args.model == 'mul_top_mul_ske':
args.skeleton_info = 'concat'
import models.architecture
return models.architecture.GAN_model(args, character_names, dataset)
else:
raise Exception('Unimplemented model')
def get_character_names(args):
if args.is_train:
"""
Put the name of subdirectory in retargeting/datasets/Mixamo as [[names of group A], [names of group B]]
"""
characters = [['Aj', 'BigVegas', 'Kaya', 'SportyGranny'],
['Malcolm_m', 'Remy_m', 'Maria_m', 'Jasper_m', 'Knight_m',
'Liam_m', 'ParasiteLStarkie_m', 'Pearl_m', 'Michelle_m', 'LolaB_m',
'Pumpkinhulk_m', 'Ortiz_m', 'Paladin_m', 'James_m', 'Joe_m',
'Olivia_m', 'Yaku_m', 'Timmy_m', 'Racer_m', 'Abe_m']]
else:
"""
To run evaluation successfully, number of characters in both groups must be the same. Repeat is okay.
"""
characters = [['BigVegas', 'BigVegas', 'BigVegas', 'BigVegas'], ['Mousey_m', 'Goblin_m', 'Mremireh_m', 'Vampire_m']]
tmp = characters[1][args.eval_seq]
characters[1][args.eval_seq] = characters[1][0]
characters[1][0] = tmp
return characters
def create_dataset(args, character_names=None):
from datasets.combined_motion import TestData, MixedData
if args.is_train:
return MixedData(args, character_names)
else:
return TestData(args, character_names)
def eval(eval_seq, save_dir, test_device='cpu'):
para_path = os.path.join(save_dir, 'para.txt')
with open(para_path, 'r') as para_file:
argv_ = para_file.readline().split()[1:]
args = option_parser.get_parser().parse_args(argv_)
args.cuda_device = test_device if torch.cuda.is_available() else 'cpu'
args.is_train = False
args.rotation = 'quaternion'
args.eval_seq = eval_seq
args.save_dir = save_dir
character_names = get_character_names(args)
dataset = create_dataset(args, character_names)
model = create_model(args, character_names, dataset)
model.load(epoch=20000)
for i, motions in tqdm(enumerate(dataset), total=len(dataset)):
model.set_input(motions)
model.test() | null |
16,062 | import sys
import os
from option_parser import try_mkdir
import numpy as np
from tqdm import tqdm
from datasets.bvh_parser import BVH_file
import BVH_mod as BVH
def split_joint(file_name, save_file=None):
if save_file is None:
save_file = file_name
target_joints = ['Spine1', 'LeftShoulder', 'RightShoulder']
target_idx = [-1] * len(target_joints)
anim, names, ftime = BVH.load(file_name)
n_joint = len(anim.parents)
for i, name in enumerate(names):
if ':' in name:
name = name[name.find(':') + 1:]
names[i] = name
for j, joint in enumerate(target_joints):
if joint == names[i]:
target_idx[j] = i
new_anim = anim.copy()
new_anim.offsets = []
new_anim.parents = []
new_anim.rotations = []
new_names = []
target_idx.sort()
bias = 0
new_id = {-1: -1}
target_idx.append(-1)
for i in range(n_joint):
new_id[i] = i + bias
if i == target_idx[bias]: bias += 1
identity = np.zeros_like(anim.rotations)
identity = identity[:, :1, :]
bias = 0
for i in range(n_joint):
new_anim.parents.append(new_id[anim.parents[i]])
new_names.append(names[i])
new_anim.rotations.append(anim.rotations[:, [i], :])
if i == target_idx[bias]:
new_anim.offsets.append(anim.offsets[i] / 2)
new_anim.parents.append(i + bias)
new_names.append(names[i] + '_split')
new_anim.offsets.append(anim.offsets[i] / 2)
new_anim.rotations.append(identity)
new_id[i] += 1
bias += 1
else:
new_anim.offsets.append(anim.offsets[i])
new_anim.offsets = np.array(new_anim.offsets)
offset_spine = anim.offsets[target_idx[0]] + anim.offsets[target_idx[0] + 1]
new_anim.offsets[target_idx[0]:target_idx[0]+3, :] = offset_spine / 3
new_anim.rotations = np.concatenate(new_anim.rotations, axis=1)
try_mkdir(os.path.split(save_file)[0])
BVH.save(save_file, new_anim, names=new_names, frametime=ftime, order='xyz')
def try_mkdir(path):
import os
if not os.path.exists(path):
os.system('mkdir -p {}'.format(path))
class BVH_file:
def __init__(self, file_path=None, args=None, dataset=None, new_root=None):
if file_path is None:
file_path = get_std_bvh(dataset=dataset)
self.anim, self._names, self.frametime = BVH.load(file_path)
if new_root is not None:
self.set_new_root(new_root)
self.skeleton_type = -1
self.edges = []
self.edge_mat = []
self.edge_num = 0
self._topology = None
self.ee_length = []
for i, name in enumerate(self._names):
if ':' in name:
name = name[name.find(':') + 1:]
self._names[i] = name
full_fill = [1] * len(corps_names)
for i, ref_names in enumerate(corps_names):
for ref_name in ref_names:
if ref_name not in self._names:
full_fill[i] = 0
break
if full_fill[3]:
self.skeleton_type = 3
else:
for i, _ in enumerate(full_fill):
if full_fill[i]:
self.skeleton_type = i
break
if self.skeleton_type == 2 and full_fill[4]:
self.skeleton_type = 4
if 'Neck1' in self._names:
self.skeleton_type = 5
if 'Left_End' in self._names:
self.skeleton_type = 6
if 'Three_Arms_Hips' in self._names:
self.skeleton_type = 7
if 'Three_Arms_Hips_split' in self._names:
self.skeleton_type = 8
if 'LHipJoint' in self._names:
self.skeleton_type = 3
if 'HipsPrisoner' in self._names:
self.skeleton_type = 9
if 'Spine1_split' in self._names:
self.skeleton_type = 10
"""
4.
Here, you need to assign self.skeleton_type the corresponding index of your own dataset in corps_names or ee_names list.
You can use self._names, which contains the joints name in original bvh file, to write your own if statement.
"""
# if ...:
# self.skeleton_type = 11
if self.skeleton_type == -1:
print(self._names)
raise Exception('Unknown skeleton')
if self.skeleton_type == 0:
self.set_new_root(1)
self.details = [i for i, name in enumerate(self._names) if name not in corps_names[self.skeleton_type]]
self.joint_num = self.anim.shape[1]
self.corps = []
self.simplified_name = []
self.simplify_map = {}
self.inverse_simplify_map = {}
for name in corps_names[self.skeleton_type]:
for j in range(self.anim.shape[1]):
if name == self._names[j]:
self.corps.append(j)
break
if len(self.corps) != len(corps_names[self.skeleton_type]):
for i in self.corps: print(self._names[i], end=' ')
print(self.corps, self.skeleton_type, len(self.corps), sep='\n')
raise Exception('Problem in file', file_path)
self.ee_id = []
for i in ee_names[self.skeleton_type]:
self.ee_id.append(corps_names[self.skeleton_type].index(i))
self.joint_num_simplify = len(self.corps)
for i, j in enumerate(self.corps):
self.simplify_map[j] = i
self.inverse_simplify_map[i] = j
self.simplified_name.append(self._names[j])
self.inverse_simplify_map[0] = -1
for i in range(self.anim.shape[1]):
if i in self.details:
self.simplify_map[i] = -1
self.edges = build_edge_topology(self.topology, self.offset)
def scale(self, alpha):
self.anim.offsets *= alpha
global_position = self.anim.positions[:, 0, :]
global_position[1:, :] *= alpha
global_position[1:, :] += (1 - alpha) * global_position[0, :]
def rotate(self, theta, axis):
q = Quaternions(np.hstack((np.cos(theta/2), np.sin(theta/2) * axis)))
position = self.anim.positions[:, 0, :].copy()
rotation = self.anim.rotations[:, 0, :]
position[1:, ...] -= position[0:-1, ...]
q_position = Quaternions(np.hstack((np.zeros((position.shape[0], 1)), position)))
q_rotation = Quaternions.from_euler(np.radians(rotation))
q_rotation = q * q_rotation
q_position = q * q_position * (-q)
self.anim.rotations[:, 0, :] = np.degrees(q_rotation.euler())
position = q_position.imaginaries
for i in range(1, position.shape[0]):
position[i] += position[i-1]
self.anim.positions[:, 0, :] = position
def topology(self):
if self._topology is None:
self._topology = self.anim.parents[self.corps].copy()
for i in range(self._topology.shape[0]):
if i >= 1: self._topology[i] = self.simplify_map[self._topology[i]]
self._topology = tuple(self._topology)
return self._topology
def get_ee_id(self):
return self.ee_id
def to_numpy(self, quater=False, edge=True):
rotations = self.anim.rotations[:, self.corps, :]
if quater:
rotations = Quaternions.from_euler(np.radians(rotations)).qs
positions = self.anim.positions[:, 0, :]
else:
positions = self.anim.positions[:, 0, :]
if edge:
index = []
for e in self.edges:
index.append(e[0])
rotations = rotations[:, index, :]
rotations = rotations.reshape(rotations.shape[0], -1)
return np.concatenate((rotations, positions), axis=1)
def to_tensor(self, quater=False, edge=True):
res = self.to_numpy(quater, edge)
res = torch.tensor(res, dtype=torch.float)
res = res.permute(1, 0)
res = res.reshape((-1, res.shape[-1]))
return res
def get_position(self):
positions = self.anim.positions
positions = positions[:, self.corps, :]
return positions
def offset(self):
return self.anim.offsets[self.corps]
def names(self):
return self.simplified_name
def get_height(self):
offset = self.offset
topo = self.topology
res = 0
p = self.ee_id[0]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
p = self.ee_id[2]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
return res
def write(self, file_path):
motion = self.to_numpy(quater=False, edge=False)
rotations = motion[..., :-3].reshape(motion.shape[0], -1, 3)
positions = motion[..., -3:]
write_bvh(self.topology, self.offset, rotations, positions, self.names, 1.0/30, 'xyz', file_path)
def get_ee_length(self):
if len(self.ee_length): return self.ee_length
degree = [0] * len(self.topology)
for i in self.topology:
if i < 0: continue
degree[i] += 1
for j in self.ee_id:
length = 0
while degree[j] <= 1:
t = self.offset[j]
length += np.dot(t, t) ** 0.5
j = self.topology[j]
self.ee_length.append(length)
height = self.get_height()
ee_group = [[0, 1], [2], [3, 4]]
for group in ee_group:
maxv = 0
for j in group:
maxv = max(maxv, self.ee_length[j])
for j in group:
self.ee_length[j] *= height / maxv
return self.ee_length
def set_new_root(self, new_root):
euler = torch.tensor(self.anim.rotations[:, 0, :], dtype=torch.float)
transform = ForwardKinematics.transform_from_euler(euler, 'xyz')
offset = torch.tensor(self.anim.offsets[new_root], dtype=torch.float)
new_pos = torch.matmul(transform, offset)
new_pos = new_pos.numpy() + self.anim.positions[:, 0, :]
self.anim.offsets[0] = -self.anim.offsets[new_root]
self.anim.offsets[new_root] = np.zeros((3, ))
self.anim.positions[:, new_root, :] = new_pos
rot0 = Quaternions.from_euler(np.radians(self.anim.rotations[:, 0, :]), order='xyz')
rot1 = Quaternions.from_euler(np.radians(self.anim.rotations[:, new_root, :]), order='xyz')
new_rot1 = rot0 * rot1
new_rot0 = (-rot1)
new_rot0 = np.degrees(new_rot0.euler())
new_rot1 = np.degrees(new_rot1.euler())
self.anim.rotations[:, 0, :] = new_rot0
self.anim.rotations[:, new_root, :] = new_rot1
new_seq = []
vis = [0] * self.anim.rotations.shape[1]
new_idx = [-1] * len(vis)
new_parent = [0] * len(vis)
def relabel(x):
nonlocal new_seq, vis, new_idx, new_parent
new_idx[x] = len(new_seq)
new_seq.append(x)
vis[x] = 1
for y in range(len(vis)):
if not vis[y] and (self.anim.parents[x] == y or self.anim.parents[y] == x):
relabel(y)
new_parent[new_idx[y]] = new_idx[x]
relabel(new_root)
self.anim.rotations = self.anim.rotations[:, new_seq, :]
self.anim.offsets = self.anim.offsets[new_seq]
names = self._names.copy()
for i, j in enumerate(new_seq):
self._names[i] = names[j]
self.anim.parents = np.array(new_parent, dtype=np.int)
def batch_split(source, dest):
files = [f for f in os.listdir(source) if f.endswith('.bvh')]
try:
bvh_file = BVH_file(os.path.join(source, files[0]))
if bvh_file.skeleton_type != 1: return
except:
return
print("Working on {}".format(os.path.split(source)[-1]))
try_mkdir(dest)
files = [f for f in os.listdir(source) if f.endswith('.bvh')]
for i, file in tqdm(enumerate(files), total=len(files)):
in_file = os.path.join(source, file)
out_file = os.path.join(dest, file)
split_joint(in_file, out_file) | null |
16,063 | import os
import numpy as np
import copy
from datasets.bvh_parser import BVH_file
from datasets.motion_dataset import MotionData
from option_parser import get_args, try_mkdir
class BVH_file:
def __init__(self, file_path=None, args=None, dataset=None, new_root=None):
if file_path is None:
file_path = get_std_bvh(dataset=dataset)
self.anim, self._names, self.frametime = BVH.load(file_path)
if new_root is not None:
self.set_new_root(new_root)
self.skeleton_type = -1
self.edges = []
self.edge_mat = []
self.edge_num = 0
self._topology = None
self.ee_length = []
for i, name in enumerate(self._names):
if ':' in name:
name = name[name.find(':') + 1:]
self._names[i] = name
full_fill = [1] * len(corps_names)
for i, ref_names in enumerate(corps_names):
for ref_name in ref_names:
if ref_name not in self._names:
full_fill[i] = 0
break
if full_fill[3]:
self.skeleton_type = 3
else:
for i, _ in enumerate(full_fill):
if full_fill[i]:
self.skeleton_type = i
break
if self.skeleton_type == 2 and full_fill[4]:
self.skeleton_type = 4
if 'Neck1' in self._names:
self.skeleton_type = 5
if 'Left_End' in self._names:
self.skeleton_type = 6
if 'Three_Arms_Hips' in self._names:
self.skeleton_type = 7
if 'Three_Arms_Hips_split' in self._names:
self.skeleton_type = 8
if 'LHipJoint' in self._names:
self.skeleton_type = 3
if 'HipsPrisoner' in self._names:
self.skeleton_type = 9
if 'Spine1_split' in self._names:
self.skeleton_type = 10
"""
4.
Here, you need to assign self.skeleton_type the corresponding index of your own dataset in corps_names or ee_names list.
You can use self._names, which contains the joints name in original bvh file, to write your own if statement.
"""
# if ...:
# self.skeleton_type = 11
if self.skeleton_type == -1:
print(self._names)
raise Exception('Unknown skeleton')
if self.skeleton_type == 0:
self.set_new_root(1)
self.details = [i for i, name in enumerate(self._names) if name not in corps_names[self.skeleton_type]]
self.joint_num = self.anim.shape[1]
self.corps = []
self.simplified_name = []
self.simplify_map = {}
self.inverse_simplify_map = {}
for name in corps_names[self.skeleton_type]:
for j in range(self.anim.shape[1]):
if name == self._names[j]:
self.corps.append(j)
break
if len(self.corps) != len(corps_names[self.skeleton_type]):
for i in self.corps: print(self._names[i], end=' ')
print(self.corps, self.skeleton_type, len(self.corps), sep='\n')
raise Exception('Problem in file', file_path)
self.ee_id = []
for i in ee_names[self.skeleton_type]:
self.ee_id.append(corps_names[self.skeleton_type].index(i))
self.joint_num_simplify = len(self.corps)
for i, j in enumerate(self.corps):
self.simplify_map[j] = i
self.inverse_simplify_map[i] = j
self.simplified_name.append(self._names[j])
self.inverse_simplify_map[0] = -1
for i in range(self.anim.shape[1]):
if i in self.details:
self.simplify_map[i] = -1
self.edges = build_edge_topology(self.topology, self.offset)
def scale(self, alpha):
self.anim.offsets *= alpha
global_position = self.anim.positions[:, 0, :]
global_position[1:, :] *= alpha
global_position[1:, :] += (1 - alpha) * global_position[0, :]
def rotate(self, theta, axis):
q = Quaternions(np.hstack((np.cos(theta/2), np.sin(theta/2) * axis)))
position = self.anim.positions[:, 0, :].copy()
rotation = self.anim.rotations[:, 0, :]
position[1:, ...] -= position[0:-1, ...]
q_position = Quaternions(np.hstack((np.zeros((position.shape[0], 1)), position)))
q_rotation = Quaternions.from_euler(np.radians(rotation))
q_rotation = q * q_rotation
q_position = q * q_position * (-q)
self.anim.rotations[:, 0, :] = np.degrees(q_rotation.euler())
position = q_position.imaginaries
for i in range(1, position.shape[0]):
position[i] += position[i-1]
self.anim.positions[:, 0, :] = position
def topology(self):
if self._topology is None:
self._topology = self.anim.parents[self.corps].copy()
for i in range(self._topology.shape[0]):
if i >= 1: self._topology[i] = self.simplify_map[self._topology[i]]
self._topology = tuple(self._topology)
return self._topology
def get_ee_id(self):
return self.ee_id
def to_numpy(self, quater=False, edge=True):
rotations = self.anim.rotations[:, self.corps, :]
if quater:
rotations = Quaternions.from_euler(np.radians(rotations)).qs
positions = self.anim.positions[:, 0, :]
else:
positions = self.anim.positions[:, 0, :]
if edge:
index = []
for e in self.edges:
index.append(e[0])
rotations = rotations[:, index, :]
rotations = rotations.reshape(rotations.shape[0], -1)
return np.concatenate((rotations, positions), axis=1)
def to_tensor(self, quater=False, edge=True):
res = self.to_numpy(quater, edge)
res = torch.tensor(res, dtype=torch.float)
res = res.permute(1, 0)
res = res.reshape((-1, res.shape[-1]))
return res
def get_position(self):
positions = self.anim.positions
positions = positions[:, self.corps, :]
return positions
def offset(self):
return self.anim.offsets[self.corps]
def names(self):
return self.simplified_name
def get_height(self):
offset = self.offset
topo = self.topology
res = 0
p = self.ee_id[0]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
p = self.ee_id[2]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
return res
def write(self, file_path):
motion = self.to_numpy(quater=False, edge=False)
rotations = motion[..., :-3].reshape(motion.shape[0], -1, 3)
positions = motion[..., -3:]
write_bvh(self.topology, self.offset, rotations, positions, self.names, 1.0/30, 'xyz', file_path)
def get_ee_length(self):
if len(self.ee_length): return self.ee_length
degree = [0] * len(self.topology)
for i in self.topology:
if i < 0: continue
degree[i] += 1
for j in self.ee_id:
length = 0
while degree[j] <= 1:
t = self.offset[j]
length += np.dot(t, t) ** 0.5
j = self.topology[j]
self.ee_length.append(length)
height = self.get_height()
ee_group = [[0, 1], [2], [3, 4]]
for group in ee_group:
maxv = 0
for j in group:
maxv = max(maxv, self.ee_length[j])
for j in group:
self.ee_length[j] *= height / maxv
return self.ee_length
def set_new_root(self, new_root):
euler = torch.tensor(self.anim.rotations[:, 0, :], dtype=torch.float)
transform = ForwardKinematics.transform_from_euler(euler, 'xyz')
offset = torch.tensor(self.anim.offsets[new_root], dtype=torch.float)
new_pos = torch.matmul(transform, offset)
new_pos = new_pos.numpy() + self.anim.positions[:, 0, :]
self.anim.offsets[0] = -self.anim.offsets[new_root]
self.anim.offsets[new_root] = np.zeros((3, ))
self.anim.positions[:, new_root, :] = new_pos
rot0 = Quaternions.from_euler(np.radians(self.anim.rotations[:, 0, :]), order='xyz')
rot1 = Quaternions.from_euler(np.radians(self.anim.rotations[:, new_root, :]), order='xyz')
new_rot1 = rot0 * rot1
new_rot0 = (-rot1)
new_rot0 = np.degrees(new_rot0.euler())
new_rot1 = np.degrees(new_rot1.euler())
self.anim.rotations[:, 0, :] = new_rot0
self.anim.rotations[:, new_root, :] = new_rot1
new_seq = []
vis = [0] * self.anim.rotations.shape[1]
new_idx = [-1] * len(vis)
new_parent = [0] * len(vis)
def relabel(x):
nonlocal new_seq, vis, new_idx, new_parent
new_idx[x] = len(new_seq)
new_seq.append(x)
vis[x] = 1
for y in range(len(vis)):
if not vis[y] and (self.anim.parents[x] == y or self.anim.parents[y] == x):
relabel(y)
new_parent[new_idx[y]] = new_idx[x]
relabel(new_root)
self.anim.rotations = self.anim.rotations[:, new_seq, :]
self.anim.offsets = self.anim.offsets[new_seq]
names = self._names.copy()
for i, j in enumerate(new_seq):
self._names[i] = names[j]
self.anim.parents = np.array(new_parent, dtype=np.int)
def collect_bvh(data_path, character, files):
print('begin {}'.format(character))
motions = []
for i, motion in enumerate(files):
if not os.path.exists(data_path + character + '/' + motion):
continue
file = BVH_file(data_path + character + '/' + motion)
new_motion = file.to_tensor().permute((1, 0)).numpy()
motions.append(new_motion)
save_file = data_path + character + '.npy'
np.save(save_file, motions)
print('Npy file saved at {}'.format(save_file)) | null |
16,064 | import os
import numpy as np
import copy
from datasets.bvh_parser import BVH_file
from datasets.motion_dataset import MotionData
from option_parser import get_args, try_mkdir
class MotionData(Dataset):
"""
Clip long dataset into fixed length window for batched training
each data is a 2d tensor with shape (Joint_num*3) * Time
"""
def __init__(self, args):
super(MotionData, self).__init__()
name = args.dataset
file_path = './datasets/Mixamo/{}.npy'.format(name)
if args.debug:
file_path = file_path[:-4] + '_debug' + file_path[-4:]
print('load from file {}'.format(file_path))
self.total_frame = 0
self.std_bvh = get_std_bvh(args)
self.args = args
self.data = []
self.motion_length = []
motions = np.load(file_path, allow_pickle=True)
motions = list(motions)
new_windows = self.get_windows(motions)
self.data.append(new_windows)
self.data = torch.cat(self.data)
self.data = self.data.permute(0, 2, 1)
if args.normalization == 1:
self.mean = torch.mean(self.data, (0, 2), keepdim=True)
self.var = torch.var(self.data, (0, 2), keepdim=True)
self.var = self.var ** (1/2)
idx = self.var < 1e-5
self.var[idx] = 1
self.data = (self.data - self.mean) / self.var
else:
self.mean = torch.mean(self.data, (0, 2), keepdim=True)
self.mean.zero_()
self.var = torch.ones_like(self.mean)
train_len = self.data.shape[0] * 95 // 100
self.test_set = self.data[train_len:, ...]
self.data = self.data[:train_len, ...]
self.data_reverse = torch.tensor(self.data.numpy()[..., ::-1].copy())
self.reset_length_flag = 0
self.virtual_length = 0
print('Window count: {}, total frame (without downsampling): {}'.format(len(self), self.total_frame))
def reset_length(self, length):
self.reset_length_flag = 1
self.virtual_length = length
def __len__(self):
if self.reset_length_flag:
return self.virtual_length
else:
return self.data.shape[0]
def __getitem__(self, item):
if isinstance(item, int): item %= self.data.shape[0]
if self.args.data_augment == 0 or np.random.randint(0, 2) == 0:
return self.data[item]
else:
return self.data_reverse[item]
def get_windows(self, motions):
new_windows = []
for motion in motions:
self.total_frame += motion.shape[0]
motion = self.subsample(motion)
self.motion_length.append(motion.shape[0])
step_size = self.args.window_size // 2
window_size = step_size * 2
n_window = motion.shape[0] // step_size - 1
for i in range(n_window):
begin = i * step_size
end = begin + window_size
new = motion[begin:end, :]
if self.args.rotation == 'quaternion':
new = new.reshape(new.shape[0], -1, 3)
rotations = new[:, :-1, :]
rotations = Quaternions.from_euler(np.radians(rotations)).qs
rotations = rotations.reshape(rotations.shape[0], -1)
positions = new[:, -1, :]
positions = np.concatenate((new, np.zeros((new.shape[0], new.shape[1], 1))), axis=2)
new = np.concatenate((rotations, new[:, -1, :].reshape(new.shape[0], -1)), axis=1)
new = new[np.newaxis, ...]
new_window = torch.tensor(new, dtype=torch.float32)
new_windows.append(new_window)
return torch.cat(new_windows)
def subsample(self, motion):
return motion[::2, :]
def denormalize(self, motion):
if self.args.normalization:
if self.var.device != motion.device:
self.var = self.var.to(motion.device)
self.mean = self.mean.to(motion.device)
ans = motion * self.var + self.mean
else: ans = motion
return ans
def get_args():
parser = get_parser()
return parser.parse_args()
def write_statistics(character, path):
args = get_args()
new_args = copy.copy(args)
new_args.data_augment = 0
new_args.dataset = character
dataset = MotionData(new_args)
mean = dataset.mean
var = dataset.var
mean = mean.cpu().numpy()[0, ...]
var = var.cpu().numpy()[0, ...]
np.save(path + '{}_mean.npy'.format(character), mean)
np.save(path + '{}_var.npy'.format(character), var) | null |
16,065 | import os
import numpy as np
import copy
from datasets.bvh_parser import BVH_file
from datasets.motion_dataset import MotionData
from option_parser import get_args, try_mkdir
The provided code snippet includes necessary dependencies for implementing the `copy_std_bvh` function. Write a Python function `def copy_std_bvh(data_path, character, files)` to solve the following problem:
copy an arbitrary bvh file as a static information (skeleton's offset) reference
Here is the function:
def copy_std_bvh(data_path, character, files):
"""
copy an arbitrary bvh file as a static information (skeleton's offset) reference
"""
cmd = 'cp \"{}\" ./datasets/Mixamo/std_bvhs/{}.bvh'.format(data_path + character + '/' + files[0], character)
os.system(cmd) | copy an arbitrary bvh file as a static information (skeleton's offset) reference |
16,066 | import sys
import numpy as np
from Quaternions import Quaternions
from models.skeleton import build_joint_topology
def write_bvh(parent, offset, rotation, position, names, frametime, order, path, endsite=None):
file = open(path, 'w')
frame = rotation.shape[0]
joint_num = rotation.shape[1]
order = order.upper()
file_string = 'HIERARCHY\n'
def write_static(idx, prefix):
nonlocal parent, offset, rotation, names, order, endsite, file_string
if idx == 0:
name_label = 'ROOT ' + names[idx]
channel_label = 'CHANNELS 6 Xposition Yposition Zposition {}rotation {}rotation {}rotation'.format(*order)
else:
name_label = 'JOINT ' + names[idx]
channel_label = 'CHANNELS 3 {}rotation {}rotation {}rotation'.format(*order)
offset_label = 'OFFSET %.6f %.6f %.6f' % (offset[idx][0], offset[idx][1], offset[idx][2])
file_string += prefix + name_label + '\n'
file_string += prefix + '{\n'
file_string += prefix + '\t' + offset_label + '\n'
file_string += prefix + '\t' + channel_label + '\n'
has_child = False
for y in range(idx+1, rotation.shape[1]):
if parent[y] == idx:
has_child = True
write_static(y, prefix + '\t')
if not has_child:
file_string += prefix + '\t' + 'End Site\n'
file_string += prefix + '\t' + '{\n'
file_string += prefix + '\t\t' + 'OFFSET 0 0 0\n'
file_string += prefix + '\t' + '}\n'
file_string += prefix + '}\n'
write_static(0, '')
file_string += 'MOTION\n' + 'Frames: {}\n'.format(frame) + 'Frame Time: %.8f\n' % frametime
for i in range(frame):
file_string += '%.6f %.6f %.6f ' % (position[i][0], position[i][1], position[i][2])
for j in range(joint_num):
file_string += '%.6f %.6f %.6f ' % (rotation[i][j][0], rotation[i][j][1], rotation[i][j][2])
file_string += '\n'
file.write(file_string)
return file_string | null |
16,067 | from torch import optim
from torch import nn
import torch
import random
from torch.optim import lr_scheduler
The provided code snippet includes necessary dependencies for implementing the `get_scheduler` function. Write a Python function `def get_scheduler(optimizer, opt)` to solve the following problem:
Return a learning rate scheduler Parameters: optimizer -- the optimizer of the network opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs. For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. See https://pytorch.org/docs/stable/optim.html for more details.
Here is the function:
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler | Return a learning rate scheduler Parameters: optimizer -- the optimizer of the network opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs. For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. See https://pytorch.org/docs/stable/optim.html for more details. |
16,068 | from torch import optim
from torch import nn
import torch
import random
from torch.optim import lr_scheduler
def get_ee(pos, pa, ees, velo=False, from_root=False):
pos = pos.clone()
for i, fa in enumerate(pa):
if i == 0: continue
if not from_root and fa == 0: continue
pos[:, :, i, :] += pos[:, :, fa, :]
pos = pos[:, :, ees, :]
if velo:
pos = pos[:, 1:, ...] - pos[:, :-1, ...]
pos = pos * 10
return pos | null |
16,069 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def dfs(x, fa, vis, dist):
vis[x] = 1
for y in range(len(fa)):
if (fa[y] == x or fa[x] == y) and vis[y] == 0:
dist[y] = dist[x] + 1
dfs(y, fa, vis, dist) | null |
16,070 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def build_edge_topology(topology, offset):
# get all edges (pa, child, offset)
edges = []
joint_num = len(topology)
for i in range(1, joint_num):
edges.append((topology[i], i, offset[i]))
return edges | null |
16,071 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def build_joint_topology(edges, origin_names):
parent = []
offset = []
names = []
edge2joint = []
joint_from_edge = [] # -1 means virtual joint
joint_cnt = 0
out_degree = [0] * (len(edges) + 10)
for edge in edges:
out_degree[edge[0]] += 1
# add root joint
joint_from_edge.append(-1)
parent.append(0)
offset.append(np.array([0, 0, 0]))
names.append(origin_names[0])
joint_cnt += 1
def make_topology(edge_idx, pa):
nonlocal edges, parent, offset, names, edge2joint, joint_from_edge, joint_cnt
edge = edges[edge_idx]
if out_degree[edge[0]] > 1:
parent.append(pa)
offset.append(np.array([0, 0, 0]))
names.append(origin_names[edge[1]] + '_virtual')
edge2joint.append(-1)
pa = joint_cnt
joint_cnt += 1
parent.append(pa)
offset.append(edge[2])
names.append(origin_names[edge[1]])
edge2joint.append(edge_idx)
pa = joint_cnt
joint_cnt += 1
for idx, e in enumerate(edges):
if e[0] == edge[1]:
make_topology(idx, pa)
for idx, e in enumerate(edges):
if e[0] == 0:
make_topology(idx, 0)
return parent, offset, names, edge2joint | null |
16,072 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def calc_edge_mat(edges):
edge_num = len(edges)
# edge_mat[i][j] = distance between edge(i) and edge(j)
edge_mat = [[100000] * edge_num for _ in range(edge_num)]
for i in range(edge_num):
edge_mat[i][i] = 0
# initialize edge_mat with direct neighbor
for i, a in enumerate(edges):
for j, b in enumerate(edges):
link = 0
for x in range(2):
for y in range(2):
if a[x] == b[y]:
link = 1
if link:
edge_mat[i][j] = 1
# calculate all the pairs distance
for k in range(edge_num):
for i in range(edge_num):
for j in range(edge_num):
edge_mat[i][j] = min(edge_mat[i][j], edge_mat[i][k] + edge_mat[k][j])
return edge_mat
The provided code snippet includes necessary dependencies for implementing the `find_neighbor` function. Write a Python function `def find_neighbor(edges, d)` to solve the following problem:
Line #373 is buggy. Thanks @crissallan!! See issue #30 (https://github.com/DeepMotionEditing/deep-motion-editing/issues/30) However, fixing this bug will make it unable to load the pretrained model and affect the reproducibility of quantitative error reported in the paper. It is not a fatal bug so we didn't touch it and we are looking for possible solutions.
Here is the function:
def find_neighbor(edges, d):
edge_mat = calc_edge_mat(edges)
neighbor_list = []
edge_num = len(edge_mat)
for i in range(edge_num):
neighbor = []
for j in range(edge_num):
if edge_mat[i][j] <= d:
neighbor.append(j)
neighbor_list.append(neighbor)
# add neighbor for global part
global_part_neighbor = neighbor_list[0].copy()
"""
Line #373 is buggy. Thanks @crissallan!!
See issue #30 (https://github.com/DeepMotionEditing/deep-motion-editing/issues/30)
However, fixing this bug will make it unable to load the pretrained model and
affect the reproducibility of quantitative error reported in the paper.
It is not a fatal bug so we didn't touch it and we are looking for possible solutions.
"""
for i in global_part_neighbor:
neighbor_list[i].append(edge_num)
neighbor_list.append(global_part_neighbor)
return neighbor_list | Line #373 is buggy. Thanks @crissallan!! See issue #30 (https://github.com/DeepMotionEditing/deep-motion-editing/issues/30) However, fixing this bug will make it unable to load the pretrained model and affect the reproducibility of quantitative error reported in the paper. It is not a fatal bug so we didn't touch it and we are looking for possible solutions. |
16,073 | import sys
import torch
from models.Kinematics import InverseKinematics
from datasets.bvh_parser import BVH_file
from tqdm import tqdm
import BVH as BVH
import Animation as Animation
from Quaternions_old import Quaternions
class BVH_file:
def __init__(self, file_path=None, args=None, dataset=None, new_root=None):
if file_path is None:
file_path = get_std_bvh(dataset=dataset)
self.anim, self._names, self.frametime = BVH.load(file_path)
if new_root is not None:
self.set_new_root(new_root)
self.skeleton_type = -1
self.edges = []
self.edge_mat = []
self.edge_num = 0
self._topology = None
self.ee_length = []
for i, name in enumerate(self._names):
if ':' in name:
name = name[name.find(':') + 1:]
self._names[i] = name
full_fill = [1] * len(corps_names)
for i, ref_names in enumerate(corps_names):
for ref_name in ref_names:
if ref_name not in self._names:
full_fill[i] = 0
break
if full_fill[3]:
self.skeleton_type = 3
else:
for i, _ in enumerate(full_fill):
if full_fill[i]:
self.skeleton_type = i
break
if self.skeleton_type == 2 and full_fill[4]:
self.skeleton_type = 4
if 'Neck1' in self._names:
self.skeleton_type = 5
if 'Left_End' in self._names:
self.skeleton_type = 6
if 'Three_Arms_Hips' in self._names:
self.skeleton_type = 7
if 'Three_Arms_Hips_split' in self._names:
self.skeleton_type = 8
if 'LHipJoint' in self._names:
self.skeleton_type = 3
if 'HipsPrisoner' in self._names:
self.skeleton_type = 9
if 'Spine1_split' in self._names:
self.skeleton_type = 10
"""
4.
Here, you need to assign self.skeleton_type the corresponding index of your own dataset in corps_names or ee_names list.
You can use self._names, which contains the joints name in original bvh file, to write your own if statement.
"""
# if ...:
# self.skeleton_type = 11
if self.skeleton_type == -1:
print(self._names)
raise Exception('Unknown skeleton')
if self.skeleton_type == 0:
self.set_new_root(1)
self.details = [i for i, name in enumerate(self._names) if name not in corps_names[self.skeleton_type]]
self.joint_num = self.anim.shape[1]
self.corps = []
self.simplified_name = []
self.simplify_map = {}
self.inverse_simplify_map = {}
for name in corps_names[self.skeleton_type]:
for j in range(self.anim.shape[1]):
if name == self._names[j]:
self.corps.append(j)
break
if len(self.corps) != len(corps_names[self.skeleton_type]):
for i in self.corps: print(self._names[i], end=' ')
print(self.corps, self.skeleton_type, len(self.corps), sep='\n')
raise Exception('Problem in file', file_path)
self.ee_id = []
for i in ee_names[self.skeleton_type]:
self.ee_id.append(corps_names[self.skeleton_type].index(i))
self.joint_num_simplify = len(self.corps)
for i, j in enumerate(self.corps):
self.simplify_map[j] = i
self.inverse_simplify_map[i] = j
self.simplified_name.append(self._names[j])
self.inverse_simplify_map[0] = -1
for i in range(self.anim.shape[1]):
if i in self.details:
self.simplify_map[i] = -1
self.edges = build_edge_topology(self.topology, self.offset)
def scale(self, alpha):
self.anim.offsets *= alpha
global_position = self.anim.positions[:, 0, :]
global_position[1:, :] *= alpha
global_position[1:, :] += (1 - alpha) * global_position[0, :]
def rotate(self, theta, axis):
q = Quaternions(np.hstack((np.cos(theta/2), np.sin(theta/2) * axis)))
position = self.anim.positions[:, 0, :].copy()
rotation = self.anim.rotations[:, 0, :]
position[1:, ...] -= position[0:-1, ...]
q_position = Quaternions(np.hstack((np.zeros((position.shape[0], 1)), position)))
q_rotation = Quaternions.from_euler(np.radians(rotation))
q_rotation = q * q_rotation
q_position = q * q_position * (-q)
self.anim.rotations[:, 0, :] = np.degrees(q_rotation.euler())
position = q_position.imaginaries
for i in range(1, position.shape[0]):
position[i] += position[i-1]
self.anim.positions[:, 0, :] = position
def topology(self):
if self._topology is None:
self._topology = self.anim.parents[self.corps].copy()
for i in range(self._topology.shape[0]):
if i >= 1: self._topology[i] = self.simplify_map[self._topology[i]]
self._topology = tuple(self._topology)
return self._topology
def get_ee_id(self):
return self.ee_id
def to_numpy(self, quater=False, edge=True):
rotations = self.anim.rotations[:, self.corps, :]
if quater:
rotations = Quaternions.from_euler(np.radians(rotations)).qs
positions = self.anim.positions[:, 0, :]
else:
positions = self.anim.positions[:, 0, :]
if edge:
index = []
for e in self.edges:
index.append(e[0])
rotations = rotations[:, index, :]
rotations = rotations.reshape(rotations.shape[0], -1)
return np.concatenate((rotations, positions), axis=1)
def to_tensor(self, quater=False, edge=True):
res = self.to_numpy(quater, edge)
res = torch.tensor(res, dtype=torch.float)
res = res.permute(1, 0)
res = res.reshape((-1, res.shape[-1]))
return res
def get_position(self):
positions = self.anim.positions
positions = positions[:, self.corps, :]
return positions
def offset(self):
return self.anim.offsets[self.corps]
def names(self):
return self.simplified_name
def get_height(self):
offset = self.offset
topo = self.topology
res = 0
p = self.ee_id[0]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
p = self.ee_id[2]
while p != 0:
res += np.dot(offset[p], offset[p]) ** 0.5
p = topo[p]
return res
def write(self, file_path):
motion = self.to_numpy(quater=False, edge=False)
rotations = motion[..., :-3].reshape(motion.shape[0], -1, 3)
positions = motion[..., -3:]
write_bvh(self.topology, self.offset, rotations, positions, self.names, 1.0/30, 'xyz', file_path)
def get_ee_length(self):
if len(self.ee_length): return self.ee_length
degree = [0] * len(self.topology)
for i in self.topology:
if i < 0: continue
degree[i] += 1
for j in self.ee_id:
length = 0
while degree[j] <= 1:
t = self.offset[j]
length += np.dot(t, t) ** 0.5
j = self.topology[j]
self.ee_length.append(length)
height = self.get_height()
ee_group = [[0, 1], [2], [3, 4]]
for group in ee_group:
maxv = 0
for j in group:
maxv = max(maxv, self.ee_length[j])
for j in group:
self.ee_length[j] *= height / maxv
return self.ee_length
def set_new_root(self, new_root):
euler = torch.tensor(self.anim.rotations[:, 0, :], dtype=torch.float)
transform = ForwardKinematics.transform_from_euler(euler, 'xyz')
offset = torch.tensor(self.anim.offsets[new_root], dtype=torch.float)
new_pos = torch.matmul(transform, offset)
new_pos = new_pos.numpy() + self.anim.positions[:, 0, :]
self.anim.offsets[0] = -self.anim.offsets[new_root]
self.anim.offsets[new_root] = np.zeros((3, ))
self.anim.positions[:, new_root, :] = new_pos
rot0 = Quaternions.from_euler(np.radians(self.anim.rotations[:, 0, :]), order='xyz')
rot1 = Quaternions.from_euler(np.radians(self.anim.rotations[:, new_root, :]), order='xyz')
new_rot1 = rot0 * rot1
new_rot0 = (-rot1)
new_rot0 = np.degrees(new_rot0.euler())
new_rot1 = np.degrees(new_rot1.euler())
self.anim.rotations[:, 0, :] = new_rot0
self.anim.rotations[:, new_root, :] = new_rot1
new_seq = []
vis = [0] * self.anim.rotations.shape[1]
new_idx = [-1] * len(vis)
new_parent = [0] * len(vis)
def relabel(x):
nonlocal new_seq, vis, new_idx, new_parent
new_idx[x] = len(new_seq)
new_seq.append(x)
vis[x] = 1
for y in range(len(vis)):
if not vis[y] and (self.anim.parents[x] == y or self.anim.parents[y] == x):
relabel(y)
new_parent[new_idx[y]] = new_idx[x]
relabel(new_root)
self.anim.rotations = self.anim.rotations[:, new_seq, :]
self.anim.offsets = self.anim.offsets[new_seq]
names = self._names.copy()
for i, j in enumerate(new_seq):
self._names[i] = names[j]
self.anim.parents = np.array(new_parent, dtype=np.int)
def get_character_height(file_name):
file = BVH_file(file_name)
return file.get_height() | null |
16,074 | import bpy
def add_floor(size):
def add_camera(location, rotation):
def add_light(location):
def make_scene(floor_size=1000, camera_position=(37.54, -28.87, 16.34), camera_rotation=(1.30473, 0.0109881, 0.896417),
light_position=(0, 0, 20)):
floor = add_floor(floor_size)
camera = add_camera(camera_position, camera_rotation)
light = add_light(light_position)
bpy.ops.object.select_all(action='DESELECT')
floor.select_set(True)
camera.select_set(True)
light.select_set(True)
bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="Scene")
bpy.ops.object.select_all(action='DESELECT')
return [floor, camera, light] | null |
16,075 | import bpy
def add_rendering_parameters(scene, args, camera):
scene.render.resolution_x = args.resX
scene.render.resolution_y = args.resY
scene.frame_end = args.frame_end
scene.camera = camera
scene.render.filepath = args.save_path
if args.render_engine == 'cycles':
scene.render.engine = 'CYCLES'
scene.cycles.device = 'GPU'
elif args.render_engine == 'eevee':
scene.render.engine = 'BLENDER_EEVEE'
scene.render.image_settings.file_format = 'AVI_JPEG'
return scene | null |
16,076 | import bpy
def add_material_for_character(objs):
char_mat = bpy.data.materials.new(name="characterMaterial")
char_mat.use_nodes = True
bsdf = char_mat.node_tree.nodes["Principled BSDF"]
bsdf.inputs[0].default_value = (0.021219, 0.278894, 1, 1) # character material color
for obj in objs:
obj.data.materials.append(char_mat) | null |
16,077 | import bpy
import sys
import numpy as np
import argparse
import os
def clean_scene():
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete() | null |
16,078 | import bpy
import sys
import numpy as np
import argparse
import os
def load_fbx(source):
bpy.ops.import_scene.fbx(filepath=source, use_anim=False) | null |
16,079 | import bpy
import sys
import numpy as np
import argparse
import os
def load_bvh(source):
bpy.ops.import_anim.bvh(filepath=source)
return source.split('/')[-1][:-4] | null |
16,080 | import bpy
import sys
import numpy as np
import argparse
import os
The provided code snippet includes necessary dependencies for implementing the `set_rest_pose_bvh` function. Write a Python function `def set_rest_pose_bvh(filename, source_arm)` to solve the following problem:
This helps recover the rest pose position from the rest pose of fbx reference file
Here is the function:
def set_rest_pose_bvh(filename, source_arm):
"""
This helps recover the rest pose position from the rest pose of fbx reference file
"""
dest_filename = filename[:-4] + '_tmp.bvh'
dest_file = open(dest_filename, 'w')
rest_loc = source_arm.data.bones[0].head_local
source_file = open(filename, 'r')
content = source_file.readlines()
flag = 0
for i in range(len(content)):
if 'ROOT' in content[i]:
content[i + 2] = '\tOFFSET %.6f %.6f %.6f\n' % (rest_loc[0], rest_loc[1], rest_loc[2])
flag = 1
break
if flag == 0:
raise Exception('Illegal bvh file')
dest_file.write(''.join(content))
return dest_filename | This helps recover the rest pose position from the rest pose of fbx reference file |
16,081 | import bpy
import sys
import numpy as np
import argparse
import os
The provided code snippet includes necessary dependencies for implementing the `extract_weight` function. Write a Python function `def extract_weight(me)` to solve the following problem:
Extract skinning weight from a given mesh
Here is the function:
def extract_weight(me):
"""
Extract skinning weight from a given mesh
"""
verts = me.data.vertices
vgrps = me.vertex_groups
weight = np.zeros((len(verts), len(vgrps)))
mask = np.zeros(weight.shape, dtype=np.int)
vgrp_label = vgrps.keys()
for i, vert in enumerate(verts):
for g in vert.groups:
j = g.group
weight[i, j] = g.weight
mask[i, j] = 1
return weight, vgrp_label, mask | Extract skinning weight from a given mesh |
16,082 | import bpy
import sys
import numpy as np
import argparse
import os
def clean_vgrps(me):
def load_weight(me, label, weight):
clean_vgrps(me)
verts = me.data.vertices
vgrps = me.vertex_groups
for name in label:
vgrps.new(name=name)
for j in range(weight.shape[1]):
idx = vgrps.find(label[j])
if idx == -1:
#pdb.set_trace()
continue
for i in range(weight.shape[0]):
vgrps[idx].add([i], weight[i, j], 'REPLACE') | null |
16,083 | import bpy
import sys
import numpy as np
import argparse
import os
def set_modifier(me, arm):
modifiers = me.modifiers
for modifier in modifiers:
if modifier.type == 'ARMATURE':
modifier.object = arm
modifier.use_vertex_groups = True
modifier.use_deform_preserve_volume = True
return
modifiers.new(name='Armature', type='ARMATURE')
modifier = modifiers[0]
modifier.object = arm
modifier.use_vertex_groups = True
modifier.use_deform_preserve_volume = True | null |
16,084 | import bpy
import sys
import numpy as np
import argparse
import os
The provided code snippet includes necessary dependencies for implementing the `adapt_weight` function. Write a Python function `def adapt_weight(source_weight, source_label, source_arm, dest_arm)` to solve the following problem:
The targeted armature could be a reduced one, e.g. no fingers. So move the skinning weight of each reduced armature to its nearest ancestor.
Here is the function:
def adapt_weight(source_weight, source_label, source_arm, dest_arm):
"""
The targeted armature could be a reduced one, e.g. no fingers. So move the skinning weight of each reduced armature to its nearest ancestor.
"""
weight = np.zeros((source_weight.shape[0], len(dest_arm.data.bones)))
# Skinning weight is bond to armature names. For simplicity, a common prefix
# is removed in our retargeting output. Here we solve this problem.
prefix = ''
ref_name = source_arm.data.bones[0].name
if ':' in ref_name and ':' not in dest_arm.data.bones[0].name:
idx = ref_name.index(':')
prefix = ref_name[:idx + 1]
dest_name = [prefix + bone.name for bone in dest_arm.data.bones]
for j, name in enumerate(source_label):
bone = source_arm.data.bones.find(name)
bone = source_arm.data.bones[bone]
while bone.parent is not None and bone.name not in dest_name:
bone = bone.parent
idx = dest_name.index(bone.name)
weight[:, idx] += source_weight[:, j]
return weight | The targeted armature could be a reduced one, e.g. no fingers. So move the skinning weight of each reduced armature to its nearest ancestor. |
16,085 | import sys
import os
import BVH
import numpy as np
import bpy
import mathutils
import pdb
class BVH_file:
def __init__(self, file_path):
self.anim, self.names, self.frametime = BVH.load(file_path)
#permute (x, y, z) to (z, x, y)
tmp = self.anim.offsets.copy()
self.anim.offsets[..., 0] = tmp[..., 2]
self.anim.offsets[..., 1] = tmp[..., 0]
self.anim.offsets[..., 2] = tmp[..., 1]
tmp = self.anim.positions.copy()
self.anim.positions[..., 0] = tmp[..., 2]
self.anim.positions[..., 1] = tmp[..., 0]
self.anim.positions[..., 2] = tmp[..., 1]
tmp = self.anim.rotations.qs.copy()
self.anim.rotations.qs[..., 1] = tmp[..., 3]
self.anim.rotations.qs[..., 2] = tmp[..., 1]
self.anim.rotations.qs[..., 3] = tmp[..., 2]
self.joint_num = self.anim.rotations.shape[1]
self.frame_num = self.anim.rotations.shape[0]
self.normalize()
def topology(self):
return self.anim.parents
def offsets(self):
return self.anim.offsets
# Normalize bone length by height and translate the (x, y) mean to (0, 0)
def normalize(self):
height = self.get_height() / global_scale
self.anim.offsets /= height
self.anim.positions /= height
mean_position = np.mean(self.anim.positions[:, 0, :], axis=0)
self.anim.positions[:, 0, 0] -= mean_position[0]
self.anim.positions[:, 0, 1] -= mean_position[1]
def get_height(self):
low = high = 0
def dfs(i, pos):
nonlocal low
nonlocal high
low = min(low, pos[-1])
high = max(high, pos[-1])
for j in range(self.joint_num):
if self.topology[j] == i:
dfs(j, pos + self.offsets[j])
dfs(0, np.array([0, 0, 0]))
return high - low
def build_t_pose(file: BVH_file, joint, parent_obj, all_obj):
if joint != 0:
offset = mathutils.Vector(file.offsets[joint])
new_bone = add_bone(offset, parent_obj, file.names[joint] + '_bone')
new_joint = add_joint(parent_obj.location + offset, new_bone, file.names[joint] + '_end')
all_obj.append(new_bone)
all_obj.append(new_joint)
else:
new_joint = add_joint(mathutils.Vector((0., 0., 0.)), None, file.names[joint] + '_end')
all_obj.append(new_joint)
for i in range(len(file.topology)):
if file.topology[i] == joint:
build_t_pose(file, i, new_joint, all_obj)
def set_animation(file, joints):
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = file.anim.rotations.shape[0] - 1
bpy.context.scene.render.fps = 1 / file.frametime
bpy.ops.object.select_all(action='DESELECT')
print('Set fps to', bpy.context.scene.render.fps)
print(file.frame_num, 'frames in total')
for frame in range(0, file.frame_num):
joints[0].location = file.anim.positions[frame, 0, :]
joints[0].keyframe_insert(data_path='location', frame=frame)
if frame % 100 == 99:
print('[{}/{}] done.'.format(frame+1, file.frame_num))
for j in range(file.joint_num):
joints[j].rotation_mode = 'QUATERNION'
joints[j].rotation_quaternion = mathutils.Quaternion(file.anim.rotations.qs[frame, j, :])
joints[j].keyframe_insert(data_path='rotation_quaternion', frame=frame)
bpy.context.scene.frame_current = 0
def load_bvh(file_name):
print('Loading BVH file......')
file = BVH_file(file_name)
print('Loading BVH file done.')
print('Building T-Pose......')
all_obj = []
build_t_pose(file, 0, None, all_obj)
print('Building T-Pose done.')
print('Loading keyframes......')
#pairing object order and file.animation's order
all_joints = []
for j in range(file.joint_num):
name = file.names[j]
for obj in all_obj:
if obj.name == name + '_end':
all_joints.append(obj)
break
set_animation(file, all_joints)
print('Loading keyframes done.')
bpy.ops.object.select_all(action='DESELECT')
for obj in all_obj:
obj.select_set(True)
bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="Character")
bpy.ops.object.select_all(action='DESELECT')
print('Load bvh all done!')
return all_obj | null |
16,086 | import bpy
import numpy as np
from os import listdir, path
def fbx2bvh(data_path, file):
sourcepath = data_path+"/"+file
bvh_path = data_path+"/"+file.split(".fbx")[0]+".bvh"
bpy.ops.import_scene.fbx(filepath=sourcepath)
frame_start = 9999
frame_end = -9999
action = bpy.data.actions[-1]
if action.frame_range[1] > frame_end:
frame_end = action.frame_range[1]
if action.frame_range[0] < frame_start:
frame_start = action.frame_range[0]
frame_end = np.max([60, frame_end])
bpy.ops.export_anim.bvh(filepath=bvh_path,
frame_start=frame_start,
frame_end=frame_end, root_transform_only=True)
bpy.data.actions.remove(bpy.data.actions[-1])
print(data_path+"/"+file+" processed.") | null |
16,087 | import re
import numpy as np
import sys
from Animation import Animation
from Quaternions_old import Quaternions
channelmap = {
'Xrotation' : 'x',
'Yrotation' : 'y',
'Zrotation' : 'z'
}
class Animation:
"""
Animation is a numpy-like wrapper for animation data
Animation data consists of several arrays consisting
of F frames and J joints.
The animation is specified by
rotations : (F, J) Quaternions | Joint Rotations
positions : (F, J, 3) ndarray | Joint Positions
The base pose is specified by
orients : (J) Quaternions | Joint Orientations
offsets : (J, 3) ndarray | Joint Offsets
And the skeletal structure is specified by
parents : (J) ndarray | Joint Parents
"""
def __init__(self, rotations, positions, orients, offsets, parents):
self.rotations = rotations
self.positions = positions
self.orients = orients
self.offsets = offsets
self.parents = parents
def __op__(self, op, other):
return Animation(
op(self.rotations, other.rotations),
op(self.positions, other.positions),
op(self.orients, other.orients),
op(self.offsets, other.offsets),
op(self.parents, other.parents))
def __iop__(self, op, other):
self.rotations = op(self.roations, other.rotations)
self.positions = op(self.roations, other.positions)
self.orients = op(self.orients, other.orients)
self.offsets = op(self.offsets, other.offsets)
self.parents = op(self.parents, other.parents)
return self
def __sop__(self, op):
return Animation(
op(self.rotations),
op(self.positions),
op(self.orients),
op(self.offsets),
op(self.parents))
def __add__(self, other): return self.__op__(operator.add, other)
def __sub__(self, other): return self.__op__(operator.sub, other)
def __mul__(self, other): return self.__op__(operator.mul, other)
def __div__(self, other): return self.__op__(operator.div, other)
def __abs__(self): return self.__sop__(operator.abs)
def __neg__(self): return self.__sop__(operator.neg)
def __iadd__(self, other): return self.__iop__(operator.iadd, other)
def __isub__(self, other): return self.__iop__(operator.isub, other)
def __imul__(self, other): return self.__iop__(operator.imul, other)
def __idiv__(self, other): return self.__iop__(operator.idiv, other)
def __len__(self): return len(self.rotations)
def __getitem__(self, k):
if isinstance(k, tuple):
return Animation(
self.rotations[k],
self.positions[k],
self.orients[k[1:]],
self.offsets[k[1:]],
self.parents[k[1:]])
else:
return Animation(
self.rotations[k],
self.positions[k],
self.orients,
self.offsets,
self.parents)
def __setitem__(self, k, v):
if isinstance(k, tuple):
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k[1:], v.orients)
self.offsets.__setitem__(k[1:], v.offsets)
self.parents.__setitem__(k[1:], v.parents)
else:
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k, v.orients)
self.offsets.__setitem__(k, v.offsets)
self.parents.__setitem__(k, v.parents)
def shape(self): return (self.rotations.shape[0], self.rotations.shape[1])
def copy(self): return Animation(
self.rotations.copy(), self.positions.copy(),
self.orients.copy(), self.offsets.copy(),
self.parents.copy())
def repeat(self, *args, **kw):
return Animation(
self.rotations.repeat(*args, **kw),
self.positions.repeat(*args, **kw),
self.orients, self.offsets, self.parents)
def ravel(self):
return np.hstack([
self.rotations.log().ravel(),
self.positions.ravel(),
self.orients.log().ravel(),
self.offsets.ravel()])
def unravel(clas, anim, shape, parents):
nf, nj = shape
rotations = anim[nf*nj*0:nf*nj*3]
positions = anim[nf*nj*3:nf*nj*6]
orients = anim[nf*nj*6+nj*0:nf*nj*6+nj*3]
offsets = anim[nf*nj*6+nj*3:nf*nj*6+nj*6]
return cls(
Quaternions.exp(rotations), positions,
Quaternions.exp(orients), offsets,
parents.copy())
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
The provided code snippet includes necessary dependencies for implementing the `load_bfa` function. Write a Python function `def load_bfa(filename, start=None, end=None, order=None, world=False)` to solve the following problem:
Reads a BVH file and constructs an animation !!! Read from bfa, will replace the end sites of arms by two joints (w/ unit rotation) Parameters ---------- filename: str File to be opened start : int Optional Starting Frame end : int Optional Ending Frame order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' world : bool If set to true euler angles are applied together in world space rather than local space Returns ------- (animation, joint_names, frametime) Tuple of loaded animation and joint names
Here is the function:
def load_bfa(filename, start=None, end=None, order=None, world=False):
"""
Reads a BVH file and constructs an animation
!!! Read from bfa, will replace the end sites of arms by two joints (w/ unit rotation)
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names
"""
f = open(filename, "r")
i = 0
active = -1
end_site = False
hand_idx = [9, 14]
names = []
orients = Quaternions.id(0)
offsets = np.array([]).reshape((0,3))
parents = np.array([], dtype=int)
for line in f:
if "HIERARCHY" in line: continue
if "MOTION" in line: continue
rmatch = re.match(r"ROOT (\w+)", line)
if rmatch:
names.append(rmatch.group(1))
offsets = np.append(offsets, np.array([[0,0,0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents)-1)
continue
if "{" in line: continue
if "}" in line:
if end_site: end_site = False
else: active = parents[active]
continue
offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
if offmatch:
if not end_site:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
"""
else:
print("active = ", active)
if active in hand_idx:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
"""
continue
chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
if chanmatch:
channels = int(chanmatch.group(1))
if order is None:
channelis = 0 if channels == 3 else 3
channelie = 3 if channels == 3 else 6
parts = line.split()[2+channelis:2+channelie]
if any([p not in channelmap for p in parts]):
continue
order = "".join([channelmap[p] for p in parts])
continue
jmatch = re.match("\s*JOINT\s+(\w+)", line)
if jmatch:
names.append(jmatch.group(1))
offsets = np.append(offsets, np.array([[0,0,0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents)-1)
continue
if "End Site" in line:
if active + 1 in hand_idx:
print("parent:", names[-1])
name = "LeftHandIndex" if active + 1 == hand_idx[0] else "RightHandIndex"
names.append(name)
offsets = np.append(offsets, np.array([[0,0,0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents)-1)
else:
end_site = True
continue
fmatch = re.match("\s*Frames:\s+(\d+)", line)
if fmatch:
if start and end:
fnum = (end - start)-1
else:
fnum = int(fmatch.group(1))
jnum = len(parents)
# result: [fnum, J, 3]
positions = offsets[np.newaxis].repeat(fnum, axis=0)
# result: [fnum, len(orients), 3]
rotations = np.zeros((fnum, len(orients), 3))
continue
fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
if fmatch:
frametime = float(fmatch.group(1))
continue
if (start and end) and (i < start or i >= end-1):
i += 1
continue
dmatch = line.strip().split()
if dmatch:
data_block = np.array(list(map(float, dmatch)))
N = len(parents)
fi = i - start if start else i
if channels == 3:
# This should be root positions[0:1] & all rotations
positions[fi,0:1] = data_block[0:3]
tmp = data_block[3: ].reshape(N - 2, 3)
tmp = np.concatenate([tmp[:hand_idx[0]],
np.array([[0, 0, 0]]),
tmp[hand_idx[0]: hand_idx[1] - 1],
np.array([[0, 0, 0]]),
tmp[hand_idx[1] - 1:]], axis=0)
rotations[fi, : ] = tmp.reshape(N,3)
elif channels == 6:
data_block = data_block.reshape(N,6)
# fill in all positions
positions[fi,:] = data_block[:,0:3]
rotations[fi,:] = data_block[:,3:6]
elif channels == 9:
positions[fi,0] = data_block[0:3]
data_block = data_block[3:].reshape(N-1,9)
rotations[fi,1:] = data_block[:,3:6]
positions[fi,1:] += data_block[:,0:3] * data_block[:,6:9]
else:
raise Exception("Too many channels! %i" % channels)
i += 1
f.close()
rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world)
return (Animation(rotations, positions, orients, offsets, parents), names, frametime) | Reads a BVH file and constructs an animation !!! Read from bfa, will replace the end sites of arms by two joints (w/ unit rotation) Parameters ---------- filename: str File to be opened start : int Optional Starting Frame end : int Optional Ending Frame order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' world : bool If set to true euler angles are applied together in world space rather than local space Returns ------- (animation, joint_names, frametime) Tuple of loaded animation and joint names |
16,088 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
The provided code snippet includes necessary dependencies for implementing the `load_from_maya` function. Write a Python function `def load_from_maya(root)` to solve the following problem:
Load joint parents and names from maya Parameters ---------- root : PyNode Root Maya Node Returns ------- (names, parents) : ([str], (J) ndarray) List of joint names and array of indices representing the parent joint for each joint J. Joint index -1 is used to represent that there is no parent joint
Here is the function:
def load_from_maya(root):
"""
Load joint parents and names from maya
Parameters
----------
root : PyNode
Root Maya Node
Returns
-------
(names, parents) : ([str], (J) ndarray)
List of joint names and array
of indices representing the parent
joint for each joint J.
Joint index -1 is used to represent
that there is no parent joint
"""
import pymel.core as pm
names = []
parents = []
def unload_joint(j, parents, par):
id = len(names)
names.append(j)
parents.append(par)
children = [c for c in j.getChildren() if
isinstance(c, pm.nt.Transform) and
not isinstance(c, pm.nt.Constraint) and
not any(pm.listRelatives(c, s=True)) and
(any(pm.listRelatives(c, ad=True, ap=False, type='joint')) or isinstance(c, pm.nt.Joint))]
map(lambda c: unload_joint(c, parents, id), children)
unload_joint(root, parents, -1)
return (names, parents) | Load joint parents and names from maya Parameters ---------- root : PyNode Root Maya Node Returns ------- (names, parents) : ([str], (J) ndarray) List of joint names and array of indices representing the parent joint for each joint J. Joint index -1 is used to represent that there is no parent joint |
16,089 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def joints(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : (J) ndarray
Array of joint indices
"""
return np.arange(len(parents), dtype=int)
The provided code snippet includes necessary dependencies for implementing the `joints_list` function. Write a Python function `def joints_list(parents)` to solve the following problem:
Parameters ---------- parents : (J) ndarray parents array Returns ------- joints : [ndarray] List of arrays of joint idices for each joint
Here is the function:
def joints_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : [ndarray]
List of arrays of joint idices for
each joint
"""
return list(joints(parents)[:,np.newaxis]) | Parameters ---------- parents : (J) ndarray parents array Returns ------- joints : [ndarray] List of arrays of joint idices for each joint |
16,090 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def joints_mask(parents): return np.eye(len(parents)).astype(bool) | null |
16,091 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def children_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
children : [ndarray]
List of arrays of joint indices for
the children of each joint
"""
def joint_children(i):
return [j for j, p in enumerate(parents) if p == i]
return list(map(lambda j: np.array(joint_children(j)), joints(parents)))
def mask(parents, filter):
"""
Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition
"""
m = np.zeros((len(parents), len(parents))).astype(bool)
jnts = joints(parents)
fltr = filter(parents)
for i,f in enumerate(fltr): m[i,:] = np.any(jnts[:,np.newaxis] == f[np.newaxis,:], axis=1)
return m
def children_mask(parents): return mask(parents, children_list) | null |
16,092 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def parents_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
parents : [ndarray]
List of arrays of joint idices for
the parents of each joint
"""
return list(parents[:,np.newaxis])
def mask(parents, filter):
"""
Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition
"""
m = np.zeros((len(parents), len(parents))).astype(bool)
jnts = joints(parents)
fltr = filter(parents)
for i,f in enumerate(fltr): m[i,:] = np.any(jnts[:,np.newaxis] == f[np.newaxis,:], axis=1)
return m
def parents_mask(parents): return mask(parents, parents_list) | null |
16,093 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def descendants_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
descendants : [ndarray]
List of arrays of joint idices for
the descendants of each joint
"""
children = children_list(parents)
def joint_descendants(i):
return sum([joint_descendants(j) for j in children[i]], list(children[i]))
return list(map(lambda j: np.array(joint_descendants(j)), joints(parents)))
def mask(parents, filter):
"""
Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition
"""
m = np.zeros((len(parents), len(parents))).astype(bool)
jnts = joints(parents)
fltr = filter(parents)
for i,f in enumerate(fltr): m[i,:] = np.any(jnts[:,np.newaxis] == f[np.newaxis,:], axis=1)
return m
def descendants_mask(parents): return mask(parents, descendants_list) | null |
16,094 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def ancestors_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
ancestors : [ndarray]
List of arrays of joint idices for
the ancestors of each joint
"""
decendants = descendants_list(parents)
def joint_ancestors(i):
return [j for j in joints(parents) if i in decendants[j]]
return list(map(lambda j: np.array(joint_ancestors(j)), joints(parents)))
def mask(parents, filter):
"""
Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition
"""
m = np.zeros((len(parents), len(parents))).astype(bool)
jnts = joints(parents)
fltr = filter(parents)
for i,f in enumerate(fltr): m[i,:] = np.any(jnts[:,np.newaxis] == f[np.newaxis,:], axis=1)
return m
def ancestors_mask(parents): return mask(parents, ancestors_list) | null |
16,095 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def joint_chain_ascend(parents, start, end):
chain = []
while start != end:
chain.append(start)
start = parents[start]
chain.append(end)
return np.array(chain, dtype=int) | null |
16,096 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def children_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
children : [ndarray]
List of arrays of joint indices for
the children of each joint
"""
def joint_children(i):
return [j for j, p in enumerate(parents) if p == i]
return list(map(lambda j: np.array(joint_children(j)), joints(parents)))
class Animation:
"""
Animation is a numpy-like wrapper for animation data
Animation data consists of several arrays consisting
of F frames and J joints.
The animation is specified by
rotations : (F, J) Quaternions | Joint Rotations
positions : (F, J, 3) ndarray | Joint Positions
The base pose is specified by
orients : (J) Quaternions | Joint Orientations
offsets : (J, 3) ndarray | Joint Offsets
And the skeletal structure is specified by
parents : (J) ndarray | Joint Parents
"""
def __init__(self, rotations, positions, orients, offsets, parents):
self.rotations = rotations
self.positions = positions
self.orients = orients
self.offsets = offsets
self.parents = parents
def __op__(self, op, other):
return Animation(
op(self.rotations, other.rotations),
op(self.positions, other.positions),
op(self.orients, other.orients),
op(self.offsets, other.offsets),
op(self.parents, other.parents))
def __iop__(self, op, other):
self.rotations = op(self.roations, other.rotations)
self.positions = op(self.roations, other.positions)
self.orients = op(self.orients, other.orients)
self.offsets = op(self.offsets, other.offsets)
self.parents = op(self.parents, other.parents)
return self
def __sop__(self, op):
return Animation(
op(self.rotations),
op(self.positions),
op(self.orients),
op(self.offsets),
op(self.parents))
def __add__(self, other): return self.__op__(operator.add, other)
def __sub__(self, other): return self.__op__(operator.sub, other)
def __mul__(self, other): return self.__op__(operator.mul, other)
def __div__(self, other): return self.__op__(operator.div, other)
def __abs__(self): return self.__sop__(operator.abs)
def __neg__(self): return self.__sop__(operator.neg)
def __iadd__(self, other): return self.__iop__(operator.iadd, other)
def __isub__(self, other): return self.__iop__(operator.isub, other)
def __imul__(self, other): return self.__iop__(operator.imul, other)
def __idiv__(self, other): return self.__iop__(operator.idiv, other)
def __len__(self): return len(self.rotations)
def __getitem__(self, k):
if isinstance(k, tuple):
return Animation(
self.rotations[k],
self.positions[k],
self.orients[k[1:]],
self.offsets[k[1:]],
self.parents[k[1:]])
else:
return Animation(
self.rotations[k],
self.positions[k],
self.orients,
self.offsets,
self.parents)
def __setitem__(self, k, v):
if isinstance(k, tuple):
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k[1:], v.orients)
self.offsets.__setitem__(k[1:], v.offsets)
self.parents.__setitem__(k[1:], v.parents)
else:
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k, v.orients)
self.offsets.__setitem__(k, v.offsets)
self.parents.__setitem__(k, v.parents)
def shape(self): return (self.rotations.shape[0], self.rotations.shape[1])
def copy(self): return Animation(
self.rotations.copy(), self.positions.copy(),
self.orients.copy(), self.offsets.copy(),
self.parents.copy())
def repeat(self, *args, **kw):
return Animation(
self.rotations.repeat(*args, **kw),
self.positions.repeat(*args, **kw),
self.orients, self.offsets, self.parents)
def ravel(self):
return np.hstack([
self.rotations.log().ravel(),
self.positions.ravel(),
self.orients.log().ravel(),
self.offsets.ravel()])
def unravel(clas, anim, shape, parents):
nf, nj = shape
rotations = anim[nf*nj*0:nf*nj*3]
positions = anim[nf*nj*3:nf*nj*6]
orients = anim[nf*nj*6+nj*0:nf*nj*6+nj*3]
offsets = anim[nf*nj*6+nj*3:nf*nj*6+nj*6]
return cls(
Quaternions.exp(rotations), positions,
Quaternions.exp(orients), offsets,
parents.copy())
The provided code snippet includes necessary dependencies for implementing the `constraints` function. Write a Python function `def constraints(anim, **kwargs)` to solve the following problem:
Constraint list for Animation This constraint list can be used in the VerletParticle solver to constrain a animation global joint positions. Parameters ---------- anim : Animation Input animation masses : (F, J) ndarray Optional list of masses for joints J across frames F defaults to weighting by vertical height Returns ------- constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)] A list of constraints in the format: (Joint1, Joint2, Masses1, Masses2, Lengths)
Here is the function:
def constraints(anim, **kwargs):
"""
Constraint list for Animation
This constraint list can be used in the
VerletParticle solver to constrain
a animation global joint positions.
Parameters
----------
anim : Animation
Input animation
masses : (F, J) ndarray
Optional list of masses
for joints J across frames F
defaults to weighting by
vertical height
Returns
-------
constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)]
A list of constraints in the format:
(Joint1, Joint2, Masses1, Masses2, Lengths)
"""
masses = kwargs.pop('masses', None)
children = children_list(anim.parents)
constraints = []
points_offsets = Animation.offsets_global(anim)
points = Animation.positions_global(anim)
if masses is None:
masses = 1.0 / (0.1 + np.absolute(points_offsets[:,1]))
masses = masses[np.newaxis].repeat(len(anim), axis=0)
for j in xrange(anim.shape[1]):
""" Add constraints between all joints and their children """
for c0 in children[j]:
dists = np.sum((points[:, c0] - points[:, j])**2.0, axis=1)**0.5
constraints.append((c0, j, masses[:,c0], masses[:,j], dists))
""" Add constraints between all children of joint """
for c1 in children[j]:
if c0 == c1: continue
dists = np.sum((points[:, c0] - points[:, c1])**2.0, axis=1)**0.5
constraints.append((c0, c1, masses[:,c0], masses[:,c1], dists))
return constraints | Constraint list for Animation This constraint list can be used in the VerletParticle solver to constrain a animation global joint positions. Parameters ---------- anim : Animation Input animation masses : (F, J) ndarray Optional list of masses for joints J across frames F defaults to weighting by vertical height Returns ------- constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)] A list of constraints in the format: (Joint1, Joint2, Masses1, Masses2, Lengths) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.