diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..d33f14d1df73832ea9a8b1efcf744b3a318c18a9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +272-dim-Motion-Representation/gif/recover_position.gif filter=lfs diff=lfs merge=lfs -text +272-dim-Motion-Representation/gif/recover_rotation.gif filter=lfs diff=lfs merge=lfs -text +272-dim-Motion-Representation/gif/spin_ik.gif filter=lfs diff=lfs merge=lfs -text +assets/teaser.jpg filter=lfs diff=lfs merge=lfs -text diff --git a/Causal_TAE/net_last.pth b/Causal_TAE/net_last.pth new file mode 100644 index 0000000000000000000000000000000000000000..2a6f7fdfb4df76b1c702fb6beafe95988e0c1edd --- /dev/null +++ b/Causal_TAE/net_last.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8becaeebbd0588d7080ea3baf19ca036fe06851035c8b5f214dac1a5cf23949c +size 304843534 diff --git a/EVAL_GT.sh b/EVAL_GT.sh new file mode 100644 index 0000000000000000000000000000000000000000..3faadc5539ff813ae667d0e2c0d3de65351e079f --- /dev/null +++ b/EVAL_GT.sh @@ -0,0 +1,2 @@ +ln -s ./humanml3d_272 ./Evaluator_272/ +python eval_gt.py \ No newline at end of file diff --git a/EVAL_causal_TAE.sh b/EVAL_causal_TAE.sh new file mode 100644 index 0000000000000000000000000000000000000000..c8d49a644b5a8186a0925350a324a4a6dab62848 --- /dev/null +++ b/EVAL_causal_TAE.sh @@ -0,0 +1,6 @@ +ln -s ../utils ./Evaluator_272/ +ln -s ../humanml3d_272 ./Evaluator_272/ +ln -s ../options ./Evaluator_272/ +ln -s ../models ./Evaluator_272/ +ln -s ../visualization ./Evaluator_272/ +python eval_causal_TAE.py --resume-pth output/causal_TAE/net_last.pth \ No newline at end of file diff --git a/EVAL_t2m.sh b/EVAL_t2m.sh new file mode 100644 index 0000000000000000000000000000000000000000..869c623a043c34d60cdd08edf16d72c2aa3f5d5e --- /dev/null +++ b/EVAL_t2m.sh @@ -0,0 +1,7 @@ +ln -s ../utils ./Evaluator_272/ +ln -s ../humanml3d_272 ./Evaluator_272/ +ln -s ../options ./Evaluator_272/ +ln -s ../models ./Evaluator_272/ +ln -s ../visualization ./Evaluator_272/ +ln -s ../Causal_TAE ./Evaluator_272/ +python eval_t2m.py --resume-pth Causal_TAE/net_last.pth --resume-trans /cpfs03/shared/IDC/wangjingbo_group/motionstreamer/Open_source_Train_AR_16_1024_fps_30_111M_9/latest.pth \ No newline at end of file diff --git a/Evaluator_272/.DS_Store b/Evaluator_272/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..7f24622f6c835010627fe92b2c8f432625b4dd6d Binary files /dev/null and b/Evaluator_272/.DS_Store differ diff --git a/Evaluator_272/configs/assets.yaml b/Evaluator_272/configs/assets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4f8e7bb4971c64c4268845fd35670aedddcc6e6 --- /dev/null +++ b/Evaluator_272/configs/assets.yaml @@ -0,0 +1,13 @@ +FOLDER: './experiments' # Experiment files saving path + +TEST: + FOLDER: './results' # Testing files saving path + +DATASET: + HUMANML3D_272: + ROOT: './datasets/humanml3d_272' # HumanML3D_272 directory + SPLIT_ROOT: './datasets/humanml3d_272/split' # HumanML3D_272 splits directory + +model: + bert_path: './deps/distilbert-base-uncased' + diff --git a/Evaluator_272/configs/base.yaml b/Evaluator_272/configs/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c79e464bb6b248e584b6d36f287a6a35af890adb --- /dev/null +++ b/Evaluator_272/configs/base.yaml @@ -0,0 +1,92 @@ +SEED_VALUE: 1234 +DEBUG: True +TRAIN: + SPLIT: 'train' + NUM_WORKERS: 2 # Number of workers + BATCH_SIZE: 4 # Size of batches + START_EPOCH: 0 # Start epoch + END_EPOCH: 400 # End epoch + RESUME: '' # Experiment path to be resumed training + PRETRAINED_VAE: '' + PRETRAINED: '' # Pretrained model path + + OPTIM: + OPTIM.TYPE: 'AdamW' # Optimizer type + OPTIM.LR: 1e-4 # Learning rate + + ABLATION: + VAE_TYPE: 'actor' # vae ablation: actor or mcross + VAE_ARCH: 'encoder_decoder' # mdiffusion vae architecture + PE_TYPE: 'actor' # mdiffusion mld or actor + DIFF_PE_TYPE: 'actor' # mdiffusion mld or actor + SKIP_CONNECT: False # skip connection for denoiser va + # use linear to expand mean and std rather expand token nums + MLP_DIST: False + IS_DIST: False # Mcross distribution kl + PREDICT_EPSILON: True # noise or motion + +EVAL: + SPLIT: 'gtest' + BATCH_SIZE: 1 # Evaluating Batch size + NUM_WORKERS: 12 # Evaluating Batch size + +TEST: + TEST_DIR: '' + CHECKPOINTS: '' # Pretrained model path + SPLIT: 'gtest' + BATCH_SIZE: 1 # Testing Batch size + NUM_WORKERS: 12 # Evaluating Batch size + SAVE_PREDICTIONS: False # Weather to save predictions + COUNT_TIME: False # Weather to count time during test + REPLICATION_TIMES: 20 # Number of times to replicate the test + MM_NUM_SAMPLES: 100 # Number of samples for multimodal test + MM_NUM_REPEATS: 30 # Number of repeats for multimodal test + MM_NUM_TIMES: 10 # Number of times to repeat the multimodal test + DIVERSITY_TIMES: 300 # Number of times to repeat the diversity test + REP_I: 0 +model: + target: 'modules' + t2m_textencoder: + dim_word: 300 + dim_pos_ohot: 15 + dim_text_hidden: 512 + dim_coemb_hidden: 512 + + t2m_motionencoder: + dim_move_hidden: 512 + dim_move_latent: 512 + dim_motion_hidden: 1024 + dim_motion_latent: 512 +LOSS: + LAMBDA_LATENT: 1e-5 # Lambda for latent losses + LAMBDA_KL: 1e-5 # Lambda for kl losses + LAMBDA_REC: 1.0 # Lambda for reconstruction losses + LAMBDA_JOINT: 1.0 # Lambda for joint losses + LAMBDA_GEN: 1.0 # Lambda for text-motion generation losses + LAMBDA_CROSS: 1.0 # Lambda for cross-reconstruction losses + LAMBDA_CYCLE: 1.0 # Lambda for cycle losses + LAMBDA_PRIOR: 0.0 + DIST_SYNC_ON_STEP: True +METRIC: + FORCE_IN_METER: True + DIST_SYNC_ON_STEP: True +DATASET: + NCLASSES: 10 + SAMPLER: + MAX_SQE: -1 + MAX_LEN: 196 + MIN_LEN: 40 + MAX_TEXT_LEN: 20 + HUMANML3D_272: + UNIT_LEN: 4 + + +LOGGER: + SACE_CHECKPOINT_EPOCH: 1 + LOG_EVERY_STEPS: 1 + VAL_EVERY_STEPS: 10 + TENSORBOARD: true + WANDB: + OFFLINE: false + PROJECT: null + RESUME_ID: null diff --git a/Evaluator_272/configs/configs_evaluator_272/H3D-TMR.yaml b/Evaluator_272/configs/configs_evaluator_272/H3D-TMR.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b8eb8ea8cf99e582b117d6e1ffbabc73bc065d2 --- /dev/null +++ b/Evaluator_272/configs/configs_evaluator_272/H3D-TMR.yaml @@ -0,0 +1,95 @@ +NAME: EXP1 # Experiment name +DEBUG: False # Debug mode +ACCELERATOR: 'gpu' # Devices optioncal: “cpu”, “gpu”, “tpu”, “ipu”, “hpu”, “mps, “auto” +DEVICE: [0] # Index of gpus eg. [0] or [0,1,2,3] +# DEVICE: [0] # Index of gpus eg. [0] or [0,1,2,3] + +# Training configuration +TRAIN: + #--------------------------------- + STAGE: temos # stage "vae" or "diffusion", "vae_diffusion" + #--------------------------------- + DATASETS: ['humanml3d_272'] # Training datasets + NUM_WORKERS: 11 # Number of workers + BATCH_SIZE: 256 # Size of batches + START_EPOCH: 0 # Start epochMMOTIONENCODER + END_EPOCH: 100 # End epoch + RESUME: '' # Resume training from this path + OPTIM: + TYPE: AdamW # Optimizer type + LR: 1e-4 # Learning rate + PRETRAINED_MLD: False + +# Evaluating Configuration +EVAL: + DATASETS: ['humanml3d_272'] # Evaluating datasets + BATCH_SIZE: 32 # Evaluating Batch size + SPLIT: test + eval_self_on_gt: True + +# Test Configuration +TEST: + PRETRAINED_CHECKPOINTS_VAE: '' + SAVE_PREDICTIONS: False + CHECKPOINTS: '' # Pretrained model path + DATASETS: ['humanml3d_272'] # training datasets + SPLIT: test + BATCH_SIZE: 32 # training Batch size + MEAN: False + NUM_SAMPLES: 1 + FACT: 1 + inference_vq_code: False + # REPLICATION_TIM + +# Datasets Configuration +DATASET: + JOINT_TYPE: 'humanml3d_v3' # join type + VERSION: '' + MOTION_TYPE: '' +METRIC: + TYPE: ['TMR_TM2TMetrics'] +# Losses Configuration +LOSS: + TYPE: temos # Losses type + USE_INFONCE: True + USE_INFONCE_FILTER: True + LAMBDA_LATENT: 1.0e-5 # Lambda for latent Losses + LAMBDA_KL: 1.0e-5 # Lambda for kl Losses + LAMBDA_REC: 1.0 # Lambda for reconstruction Losses + LAMBDA_GEN: 1.0 # Lambda for text-motion generation losses + LAMBDA_CROSS: 1.0 # Lambda for reconstruction Losses + LAMBDA_CYCLE: 0.0 # Lambda for cycle Losses + LAMBDA_PRIOR: 0.0 + LAMBDA_INFONCE: 0.1 # Lambda for infonce + INFONCE_TEMP: 0.1 + DIST_SYNC_ON_STEP: False # Sync Losses on step when distributed trained + USE_RECLIPLOSS: False + SYNC: False + TRAIN_TMR: False + +# Model Configuration +model: + vae: true # whether vae model + model_type: temos # model type + condition: 'text' + target: modules_temos + ##### + latent_dim: 256 # latent dimension + ff_size: 1024 # + num_layers: 4 # number of layers + num_head: 6 # number of head layers + dropout: 0.1 # dropout rate + activation: gelu # activation type + eval_text_encode_way: given_glove + eval_text_source: token + +# Logger configuration +LOGGER: + SAVE_CHECKPOINT_EPOCH: 10 + LOG_EVERY_STEPS: 1 + VAL_EVERY_STEPS: 5 + TENSORBOARD: True + WANDB: + PROJECT: null + OFFLINE: False + RESUME_ID: null \ No newline at end of file diff --git a/Evaluator_272/configs/modules/denoiser.yaml b/Evaluator_272/configs/modules/denoiser.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96964dd96c29fcdc38a9500d82c3cd87f8acfcd9 --- /dev/null +++ b/Evaluator_272/configs/modules/denoiser.yaml @@ -0,0 +1,22 @@ +denoiser: + target: mld.models.architectures.mld_denoiser.MldDenoiser + params: + text_encoded_dim: 768 + ff_size: 1024 + num_layers: 9 + num_heads: 4 + dropout: 0.1 + normalize_before: False + activation: 'gelu' + flip_sin_to_cos: True + return_intermediate_dec: False + position_embedding: 'learned' + arch: trans_enc + freq_shift: 0 + condition: ${model.condition} + latent_dim: ${model.latent_dim} + guidance_scale: ${model.guidance_scale} + guidance_uncondp: ${model.guidance_uncondp} + nfeats: ${DATASET.NFEATS} + nclasses: ${DATASET.NCLASSES} + ablation: ${TRAIN.ABLATION} diff --git a/Evaluator_272/configs/modules/evaluators.yaml b/Evaluator_272/configs/modules/evaluators.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12145873742544d94cfab32660143d91a8739d42 --- /dev/null +++ b/Evaluator_272/configs/modules/evaluators.yaml @@ -0,0 +1,20 @@ +t2m_textencoder: + target: mld.models.architectures.t2m_textenc.TextEncoderBiGRUCo + params: + word_size: 300 + pos_size: 15 + hidden_size: 512 + output_size: 512 + +t2m_moveencoder: + target: mld.models.architectures.t2m_textenc.MovementConvEncoder + params: + hidden_size: 512 + output_size: 512 + +t2m_motionencoder: + target: mld.models.architectures.t2m_motionenc.MotionEncoder + params: + input_size: ${model.t2m_moveencoder.output_size} + hidden_size: 1024 + output_size: 512 diff --git a/Evaluator_272/configs/modules/motion_vae.yaml b/Evaluator_272/configs/modules/motion_vae.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3be33a3e9e5fb84deedba4026c012d4272be039b --- /dev/null +++ b/Evaluator_272/configs/modules/motion_vae.yaml @@ -0,0 +1,15 @@ +motion_vae: + # Optional: mld_vae, vposert_vae + target: mld.models.architectures.mld_vae.MldVae + params: + arch: 'encoder_decoder' + ff_size: 1024 + num_layers: 9 + num_heads: 4 + dropout: 0.1 + normalize_before: false + activation: 'gelu' + position_embedding: 'learned' + latent_dim: ${model.latent_dim} + nfeats: ${DATASET.NFEATS} + ablation: ${TRAIN.ABLATION} diff --git a/Evaluator_272/configs/modules/scheduler.yaml b/Evaluator_272/configs/modules/scheduler.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c248593217d0d8d5ee225fd8ca468b55bfd4d56d --- /dev/null +++ b/Evaluator_272/configs/modules/scheduler.yaml @@ -0,0 +1,25 @@ +scheduler: + target: diffusers.DDIMScheduler + num_inference_timesteps: 50 + eta: 0.0 + params: + num_train_timesteps: 1000 + beta_start: 0.00085 + beta_end: 0.012 + beta_schedule: 'scaled_linear' # Optional: ['linear', 'scaled_linear', 'squaredcos_cap_v2'] + # variance_type: 'fixed_small' + clip_sample: false # clip sample to -1~1 + # below are for ddim + set_alpha_to_one: false + steps_offset: 1 + + +noise_scheduler: + target: diffusers.DDPMScheduler + params: + num_train_timesteps: 1000 + beta_start: 0.00085 + beta_end: 0.012 + beta_schedule: 'scaled_linear' # Optional: ['linear', 'scaled_linear', 'squaredcos_cap_v2'] + variance_type: 'fixed_small' + clip_sample: false # clip sample to -1~1 diff --git a/Evaluator_272/configs/modules/text_encoder.yaml b/Evaluator_272/configs/modules/text_encoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fb89f45c69a09250bc596229752c0ffc19dbb98 --- /dev/null +++ b/Evaluator_272/configs/modules/text_encoder.yaml @@ -0,0 +1,8 @@ +text_encoder: + # Optional: mld_clip, mld_bert + target: mld.models.architectures.mld_clip.MldTextEncoder + params: + finetune: false # if false, model weights are frozen + last_hidden_state: false # if true, the last hidden state is used as the text embedding + latent_dim: ${model.latent_dim} + modelpath: ${model.clip_path} diff --git a/Evaluator_272/configs/modules_temos/motiondecoder.yaml b/Evaluator_272/configs/modules_temos/motiondecoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd701ae85044143773d37d3be23833d527634ff7 --- /dev/null +++ b/Evaluator_272/configs/modules_temos/motiondecoder.yaml @@ -0,0 +1,11 @@ +motiondecoder: + name: actor_decoder + target: mld.models.architectures.temos.motiondecoder.actor.ActorAgnosticDecoder + params: + latent_dim: ${model.latent_dim} + ff_size: ${model.ff_size} + num_layers: ${model.num_layers} + num_head: ${model.num_head} + droupout: ${model.dropout} + activation: ${model.activation} + nfeats: ${DATASET.NFEATS} \ No newline at end of file diff --git a/Evaluator_272/configs/modules_temos/motionencoder.yaml b/Evaluator_272/configs/modules_temos/motionencoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e696f00c00cdc4018843dec1489a5b68fd749819 --- /dev/null +++ b/Evaluator_272/configs/modules_temos/motionencoder.yaml @@ -0,0 +1,12 @@ +motionencoder: + name: actor_encoder + target: mld.models.architectures.temos.motionencoder.actor.ActorAgnosticEncoder + params: + latent_dim: ${model.latent_dim} + vae: ${model.vae} + ff_size: ${model.ff_size} + num_layers: ${model.num_layers} + num_head: ${model.num_head} + droupout: ${model.dropout} + activation: ${model.activation} + nfeats: ${DATASET.NFEATS} \ No newline at end of file diff --git a/Evaluator_272/configs/modules_temos/text_encoder.yaml b/Evaluator_272/configs/modules_temos/text_encoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c718b4c7a54571bde6fa64891a31f87b679909d6 --- /dev/null +++ b/Evaluator_272/configs/modules_temos/text_encoder.yaml @@ -0,0 +1,13 @@ +textencoder: + name: distilbert_actor + target: mld.models.architectures.temos.textencoder.distillbert_actor.DistilbertActorAgnosticEncoder + params: + latent_dim: ${model.latent_dim} + vae: ${model.vae} + ff_size: ${model.ff_size} + num_layers: ${model.num_layers} + num_head: ${model.num_head} + droupout: ${model.dropout} + activation: ${model.activation} + finetune: false + modelpath: ${model.bert_path} \ No newline at end of file diff --git a/Evaluator_272/datasets/__init__.py b/Evaluator_272/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/__init__.py b/Evaluator_272/mld/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/callback/__init__.py b/Evaluator_272/mld/callback/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e290a7d9ac46f036f793c88d7286cbc070d2057d --- /dev/null +++ b/Evaluator_272/mld/callback/__init__.py @@ -0,0 +1 @@ +from .progress import ProgressLogger diff --git a/Evaluator_272/mld/callback/progress.py b/Evaluator_272/mld/callback/progress.py new file mode 100644 index 0000000000000000000000000000000000000000..eca07fc20b6e2ec457ac46a7ac938c6f202ac51d --- /dev/null +++ b/Evaluator_272/mld/callback/progress.py @@ -0,0 +1,54 @@ +import logging + +from pytorch_lightning import LightningModule, Trainer +from pytorch_lightning.callbacks import Callback +import psutil + +logger = logging.getLogger() + + +class ProgressLogger(Callback): + + def __init__(self, metric_monitor: dict, precision: int = 3): + # Metric to monitor + self.metric_monitor = metric_monitor + self.precision = precision + + def on_train_start(self, trainer: Trainer, pl_module: LightningModule, + **kwargs) -> None: + logger.info("Training started") + + def on_train_end(self, trainer: Trainer, pl_module: LightningModule, + **kwargs) -> None: + logger.info("Training done") + + def on_validation_epoch_end(self, trainer: Trainer, + pl_module: LightningModule, **kwargs) -> None: + if trainer.sanity_checking: + logger.info("Sanity checking ok.") + + def on_train_epoch_end(self, + trainer: Trainer, + pl_module: LightningModule, + padding=False, + **kwargs) -> None: + metric_format = f"{{:.{self.precision}e}}" + line = f"Epoch {trainer.current_epoch}" + if padding: + line = f"{line:>{len('Epoch xxxx')}}" # Right padding + metrics_str = [] + + losses_dict = trainer.callback_metrics + for metric_name, dico_name in self.metric_monitor.items(): + if dico_name in losses_dict: + metric = losses_dict[dico_name].item() + metric = metric_format.format(metric) + metric = f"{metric_name} {metric}" + metrics_str.append(metric) + + if len(metrics_str) == 0: + return + + memory = f"Memory {psutil.virtual_memory().percent}%" + line = line + ": " + " ".join(metrics_str) + " " + memory + logger.info(line) diff --git a/Evaluator_272/mld/config.py b/Evaluator_272/mld/config.py new file mode 100644 index 0000000000000000000000000000000000000000..05bac9aa6784420357e67b6f37c2383ca6533724 --- /dev/null +++ b/Evaluator_272/mld/config.py @@ -0,0 +1,104 @@ +import importlib +from argparse import ArgumentParser +from omegaconf import OmegaConf +import os + + +def get_module_config(cfg_model, path="modules"): + module_conf = OmegaConf.create() + files = os.listdir(f'./configs/{path}/') + for file in files: + if file.endswith('.yaml'): + with open(f'./configs/{path}/' + file, 'r') as f: + module_conf.merge_with(OmegaConf.load(f)) + module_conf.merge_with(cfg_model) + return module_conf + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def parse_args(phase="train"): + parser = ArgumentParser() + + group = parser.add_argument_group("Training options") + if phase in ["train", "test"]: + group.add_argument( + "--cfg", + type=str, + required=False, + default="./configs/config.yaml", + help="config file", + ) + group.add_argument( + "--cfg_assets", + type=str, + required=False, + default="./configs/assets.yaml", + help="config file for asset paths", + ) + group.add_argument("--batch_size", + type=int, + required=False, + help="training batch size") + group.add_argument("--device", + type=int, + nargs="+", + required=False, + help="training device") + group.add_argument("--nodebug", + action="store_true", + required=False, + help="debug or not") + group.add_argument("--dir", + type=str, + required=False, + help="evaluate existing npys") + + # remove None params, and create a dictionnary + params = parser.parse_args() + # params = {key: val for key, val in vars(opt).items() if val is not None} + + # update config from files + cfg_base = OmegaConf.load('./configs/base.yaml') + cfg_exp = OmegaConf.merge(cfg_base, OmegaConf.load(params.cfg)) + cfg_model = get_module_config(cfg_exp.model, cfg_exp.model.target) + cfg_exp.model = cfg_model + cfg_assets = OmegaConf.load(params.cfg_assets) + cfg = OmegaConf.merge(cfg_exp, cfg_model, cfg_assets) + + if phase in ["train", "test"]: + cfg.TRAIN.BATCH_SIZE = (params.batch_size + if params.batch_size else cfg.TRAIN.BATCH_SIZE) + cfg.DEVICE = params.device if params.device else cfg.DEVICE + cfg.DEBUG = not params.nodebug if params.nodebug is not None else cfg.DEBUG + + cfg.DEBUG = False if phase == "test" else cfg.DEBUG + if phase == "test": + cfg.DEBUG = False + cfg.DEVICE = [0] + print("Force no debugging and one gpu when testing") + cfg.TEST.TEST_DIR = params.dir if params.dir else cfg.TEST.TEST_DIR + + # debug mode + if cfg.DEBUG: + cfg.NAME = "debug--" + cfg.NAME + cfg.LOGGER.WANDB.OFFLINE = True + cfg.LOGGER.VAL_EVERY_STEPS = 1 + + return cfg diff --git a/Evaluator_272/mld/data/HumanML3D_272.py b/Evaluator_272/mld/data/HumanML3D_272.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd5c21f18acc0418851f7e0205fdc6964942118 --- /dev/null +++ b/Evaluator_272/mld/data/HumanML3D_272.py @@ -0,0 +1,131 @@ +import numpy as np +import torch + +from mld.data.humanml.scripts.motion_process import (process_file, + recover_from_ric, recover_from_root_rot6d) + +from .base import BASEDataModule +from .humanml.data.dataset import Text2MotionDatasetV2 +from .humanml.common.skeleton import Skeleton +import torch.nn.functional as F + + +class HumanML3D_272_DataModule(BASEDataModule): + + def __init__(self, + cfg, + batch_size, + num_workers, + collate_fn=None, + phase="train", + **kwargs): + super().__init__(batch_size=batch_size, + num_workers=num_workers, + collate_fn=collate_fn) + + self.save_hyperparameters(logger=False) + self.name = "humanml3d_272" + self.njoints = 22 + self.hparams['njoints']=22 + if phase == "text_only": + self.Dataset = TextOnlyDataset + else: + if cfg.TRAIN.STAGE in ['gpt'] and (not cfg.TEST.inference_vq_code): + if cfg.model.vae_type in ['humanvq']: + self.Dataset = Text2MotionDatasetV2_VQToken + elif cfg.model.vae_type in ['hvq']: + self.Dataset = Text2MotionDatasetV2_Dual_codebook_VQToken + else: + raise NotImplementedError + elif cfg.TEST.inference_vq_code: + self.Dataset = VQMotionDataset + else: + self.Dataset = Text2MotionDatasetV2 + self.cfg = cfg + sample_overrides = { + "split": "val", + "tiny": True, + "progress_bar": False + } + + self._sample_set = self.get_sample_set(overrides=sample_overrides) + + self.nfeats = self._sample_set.nfeats + + def recover_from_local_position(self, final_x, njoint): + + def accumulate_rotations(relative_rotations): + R_total = [relative_rotations[0]] + for R_rel in relative_rotations[1:]: + R_total.append(np.matmul(R_rel, R_total[-1])) + + return np.array(R_total) + + def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + nfrm, _ = final_x.shape + positions_no_heading = final_x[:,8:8+3*njoint].reshape(nfrm, -1, 3) + velocities_root_xy_no_heading = final_x[:,:2] + global_heading_diff_rot = final_x[:,2:8] + + global_heading_rot = accumulate_rotations(rotation_6d_to_matrix(torch.from_numpy(global_heading_diff_rot)).numpy()) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + positions_with_heading = np.matmul(np.repeat(inv_global_heading_rot[:, None,:, :], njoint, axis=1), positions_no_heading[...,None]).squeeze(-1) + velocities_root_xyz_no_heading = np.zeros((velocities_root_xy_no_heading.shape[0], 3)) + velocities_root_xyz_no_heading[:, 0] = velocities_root_xy_no_heading[:, 0] + velocities_root_xyz_no_heading[:, 2] = velocities_root_xy_no_heading[:, 1] + velocities_root_xyz_no_heading[1:, :] = np.matmul(inv_global_heading_rot[:-1], velocities_root_xyz_no_heading[1:, :,None]).squeeze(-1) + + root_translation = np.cumsum(velocities_root_xyz_no_heading, axis=0) + positions_with_heading[:, :, 0] += root_translation[:, 0:1] + positions_with_heading[:, :, 2] += root_translation[:, 2:] + + return positions_with_heading + + def feats2joints(self, features, skel=None, motion_type=''): + assert motion_type in [''] + assert features.shape[2] == 272 + mean = torch.tensor(self.hparams.mean).to(features) + std = torch.tensor(self.hparams.std).to(features) + features = features * std + mean + return self.recover_from_local_position(features.reshape(-1, 272).detach().cpu().numpy(), self.njoints).reshape(features.shape[0], -1, 22, 3) + + + def joints2feats(self, features): + features = process_file(features, self.njoints)[0] + return features + + def renorm4t2m(self, features): + ori_mean = torch.tensor(self.hparams.mean).to(features) + ori_std = torch.tensor(self.hparams.std).to(features) + eval_mean = torch.tensor(self.hparams.mean_eval).to(features) + eval_std = torch.tensor(self.hparams.std_eval).to(features) + features = features * ori_std + ori_mean + features = (features - eval_mean) / eval_std + return features + + def renorm2ori(self, features): + mean = torch.tensor(self.hparams.mean).to(features) + std = torch.tensor(self.hparams.std).to(features) + features = features * std + mean + + return features + + + def mm_mode(self, mm_on=True): + if mm_on: + self.is_mm = True + self.name_list = self.test_dataset.name_list + self.mm_list = np.random.choice(self.name_list, + self.cfg.TEST.MM_NUM_SAMPLES, + replace=False) + self.test_dataset.name_list = self.mm_list + else: + self.is_mm = False + self.test_dataset.name_list = self.name_list diff --git a/Evaluator_272/mld/data/__init__.py b/Evaluator_272/mld/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/data/base.py b/Evaluator_272/mld/data/base.py new file mode 100644 index 0000000000000000000000000000000000000000..013fe81e4260a14497dc504435db9b2e7ea14f94 --- /dev/null +++ b/Evaluator_272/mld/data/base.py @@ -0,0 +1,105 @@ +from os.path import join as pjoin +import numpy as np +import pytorch_lightning as pl +from torch.utils.data import DataLoader + + +class BASEDataModule(pl.LightningDataModule): + + def __init__(self, collate_fn, batch_size: int, num_workers: int): + super().__init__() + + self.dataloader_options = { + "batch_size": batch_size, + "num_workers": num_workers, + "collate_fn": collate_fn, + } + + self.persistent_workers = True + self.is_mm = False + + def get_sample_set(self, overrides={}): + sample_params = self.hparams.copy() + sample_params.update(overrides) + split_file = pjoin( + eval(f"self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT"), self.cfg.DATASET.VERSION, + self.cfg.EVAL.SPLIT + ".txt", + ) + return self.Dataset(split_file=split_file, **sample_params) + + def __getattr__(self, item): + # train_dataset/val_dataset etc cached like properties + if item.endswith("_dataset") and not item.startswith("_"): + subset = item[:-len("_dataset")] + item_c = "_" + item + if item_c not in self.__dict__: + # todo: config name not consistent + subset = subset.upper() if subset != "val" else "EVAL" + split = eval(f"self.cfg.{subset}.SPLIT") + split_file = pjoin( + eval(f"self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT"), + self.cfg.DATASET.VERSION, + eval(f"self.cfg.{subset}.SPLIT") + ".txt", + ) + self.__dict__[item_c] = self.Dataset(split_file=split_file, + split=split, + **self.hparams) + return getattr(self, item_c) + classname = self.__class__.__name__ + raise AttributeError(f"'{classname}' object has no attribute '{item}'") + + def setup(self, stage=None): + self.stage = stage + # Use the getter the first time to load the data + if stage in (None, "fit"): + _ = self.train_dataset + _ = self.val_dataset + if stage in (None, "test"): + _ = self.test_dataset + + def train_dataloader(self): + return DataLoader( + self.train_dataset, + shuffle=True, + persistent_workers=True, + **self.dataloader_options, + ) + + def predict_dataloader(self): + dataloader_options = self.dataloader_options.copy() + dataloader_options[ + "batch_size"] = 1 if self.is_mm else self.cfg.TEST.BATCH_SIZE + dataloader_options["num_workers"] = self.cfg.TEST.NUM_WORKERS + dataloader_options["shuffle"] = False + return DataLoader( + self.test_dataset, + persistent_workers=True, + **dataloader_options, + ) + + def val_dataloader(self): + # overrides batch_size and num_workers + dataloader_options = self.dataloader_options.copy() + dataloader_options["batch_size"] = self.cfg.EVAL.BATCH_SIZE + dataloader_options["num_workers"] = self.cfg.EVAL.NUM_WORKERS + dataloader_options["shuffle"] = False + + return DataLoader( + self.val_dataset, + persistent_workers=True, + **dataloader_options, + ) + + def test_dataloader(self): + # overrides batch_size and num_workers + dataloader_options = self.dataloader_options.copy() + dataloader_options[ + "batch_size"] = 1 if self.is_mm else self.cfg.TEST.BATCH_SIZE + dataloader_options["num_workers"] = self.cfg.TEST.NUM_WORKERS + # dataloader_options["drop_last"] = True + dataloader_options["shuffle"] = False + return DataLoader( + self.test_dataset, + persistent_workers=True, + **dataloader_options, + ) diff --git a/Evaluator_272/mld/data/get_data.py b/Evaluator_272/mld/data/get_data.py new file mode 100644 index 0000000000000000000000000000000000000000..5e64ede3c171dacae32182ae8f92ec78bfce7dc4 --- /dev/null +++ b/Evaluator_272/mld/data/get_data.py @@ -0,0 +1,183 @@ +from os.path import join as pjoin +import numpy as np +# from .humanml.utils.word_vectorizer import WordVectorizer, WordVectorizer_only_text_token +from .utils import * +from .HumanML3D_272 import HumanML3D_272_DataModule + + +def get_mean_std(phase, cfg, dataset_name): + assert dataset_name == 'humanml3d_272' + + data_root = eval(f"cfg.DATASET.{dataset_name.upper()}.ROOT") + mean = np.load(pjoin(data_root, 'mean_std', cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, "Mean.npy")) + std = np.load(pjoin(data_root, 'mean_std', cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, "Std.npy")) + return mean, std + + + +def get_njoints(dataset_name): + njoints = 22 + return njoints + + +def reget_mean_std(cfg, dataset_name, mean, std): + if 'MINOR_MOTION_TYPE' in cfg.DATASET: + select_motion_type = cfg.DATASET.MINOR_MOTION_TYPE + else: + select_motion_type = cfg.DATASET.MOTION_TYPE + + njoints = get_njoints(dataset_name) + if select_motion_type == 'root_position': + mean = mean[..., :4+(njoints - 1) * 3] + elif select_motion_type == 'root_position_vel': + mean = np.concatenate((mean[..., :4+(njoints - 1) * 3], mean[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + elif select_motion_type == 'root_position_rot6d': + mean = np.concatenate((mean[..., :4+(njoints - 1) * 3], mean[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=0) + elif select_motion_type == 'root_rot6d': + mean = np.concatenate((mean[..., :4], mean[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=0) + elif select_motion_type in ['all', 'smplx_212', 'vector_263', 'vector_263_ori_humanml', 'smplx_159', '']: + pass + elif select_motion_type == 'root_body_pos_vel_hand_all': + mean = np.concatenate((mean[..., :4+(njoints - 1) * 3], mean[..., 4+(njoints - 1) * 3 + 21 * 6 : 4+(njoints - 1) * 9], mean[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + # pass + elif select_motion_type == 'root_body_pos_vel_hand_pos_vel': + mean = np.concatenate((mean[..., :4+(njoints - 1) * 3], mean[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_pos': + mean = np.concatenate((mean[..., :4+(njoints - 1) * 3], mean[..., 4+(njoints - 1) * 9 + 22 * 3: 4+(njoints - 1) * 9 + 52*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_rot': + mean = np.concatenate((mean[..., :4+(22 - 1) * 3], mean[..., 4+(52 - 1) * 3 + (22-1)*6 : 4+(52-1)*9], mean[..., 4+(52 - 1) * 9: 4+(52 - 1) * 9 + 22*3]), axis=0) + elif select_motion_type == 'root_position_vel_only_body': + mean = np.concatenate((mean[..., :4+(22 - 1) * 3], mean[..., 4+(52 - 1) * 9: 4+(52 - 1) * 9 + 22*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_pos_vel_hand_wrist': + body_pos_mean = mean[..., :4+(22 - 1) * 3] # 67 + left_hand_pos_mean = (mean[..., 4+(22 - 1) * 3:4+(37 - 1) * 3].reshape(15, 3) - body_pos_mean[..., -6:-3]).reshape(-1) # 45 + right_hand_pos_mean = (mean[..., 4+(37 - 1) * 3:4+(52 - 1) * 3].reshape(15, 3) - body_pos_mean[..., -3:]).reshape(-1) # 45 + + body_vel_mean = mean[..., 4+(52 - 1) * 9: 4+(52 - 1) * 9 + 22*3] # 66 + left_hand_vel_mean = (mean[..., 4+(52 - 1) * 9 + 22*3: 4+(52 - 1) * 9 + 22*3 + 15 * 3].reshape(15, 3) - body_vel_mean[..., -6:-3]).reshape(-1) + right_hand_vel_mean = (mean[..., 4+(52 - 1) * 9 + 22*3+ 15 * 3: 4+(52 - 1) * 9 + 22*3 + 15 * 3 + 15 * 3].reshape(15, 3) - body_vel_mean[..., -3:]).reshape(-1) + + mean = np.concatenate((body_pos_mean, left_hand_pos_mean, right_hand_pos_mean, body_vel_mean, left_hand_vel_mean, right_hand_vel_mean), axis=0) + else: + raise NotImplementedError + + if select_motion_type == 'root_position': + std = std[..., :4+(njoints-1)*3] + elif select_motion_type == 'root_position_vel': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + elif select_motion_type == 'root_position_rot6d': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=0) + elif select_motion_type == 'root_rot6d': + std = np.concatenate((std[..., :4], std[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=0) + elif select_motion_type in ['all', 'smplx_212', 'vector_263', 'vector_263_ori_humanml', 'smplx_159', '']: + pass + elif select_motion_type == 'root_body_pos_vel_hand_all': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 3 + 21 * 6 : 4+(njoints - 1) * 9], std[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + # pass + elif select_motion_type == 'root_body_pos_vel_hand_pos_vel': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_pos': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 9 + 22 * 3: 4+(njoints - 1) * 9 + 52*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_rot': + std = np.concatenate((std[..., :4+(22 - 1) * 3], std[..., 4+(52 - 1) * 3 + (22-1)*6 : 4+(52-1)*9], std[..., 4+(52 - 1) * 9: 4+(52 - 1) * 9 + 22*3]), axis=0) + elif select_motion_type == 'root_position_vel_only_body': + std = np.concatenate((std[..., :4+(22 - 1) * 3], std[..., 4+(52 - 1) * 9: 4+(52 - 1) * 9 + 22*3]), axis=0) + elif select_motion_type == 'root_body_pos_vel_hand_pos_vel_hand_wrist': + std = np.concatenate((std[..., :4+(njoints - 1) * 3], std[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=0) + else: + raise NotImplementedError + + return mean, std + +# def get_WordVectorizer(cfg, phase, dataset_name): +# if phase not in ["text_only"]: +# if dataset_name.lower() in ['humanml3d_272']: +# if cfg.model.eval_text_source == 'token': +# return WordVectorizer(cfg.DATASET.WORD_VERTILIZER_PATH, "our_vab", cfg.model.eval_text_encode_way) +# else: +# return WordVectorizer_only_text_token(cfg.DATASET.WORD_VERTILIZER_PATH, "our_vab", cfg.model.eval_text_encode_way) +# else: +# raise ValueError("Only support WordVectorizer for HumanML3D_272") +# else: +# return None + + +def get_collate_fn(name, cfg, phase="train"): + if name.lower() in ['humanml3d_272']: + if cfg.model.condition in ['text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion'] and (not cfg.TEST.inference_vq_code): + return mld_collate_text_all + elif cfg.TEST.inference_vq_code: + return vq_collate + elif cfg.TRAIN.STAGE in ['gpt'] and (not cfg.TEST.inference_vq_code): + return mld_collate_vq_token + else: + return mld_collate + else: + raise NotImplementedError + + +# map config name to module&path +dataset_module_map = { + 'humanml3d_272': HumanML3D_272_DataModule +} +motion_subdir = {'humanml3d_272': 'motion_data'} + + +def get_datasets(cfg, logger=None, phase="train"): + # get dataset names form cfg + dataset_names = eval(f"cfg.{phase.upper()}.DATASETS") + datasets = [] + for dataset_name in dataset_names: + if dataset_name.lower() in ["humanml3d_272"]: + + if 'MINOR_MOTION_TYPE' in cfg.DATASET: + input_format = cfg.DATASET.MINOR_MOTION_TYPE + else: + input_format = cfg.DATASET.MOTION_TYPE + + data_root = eval(f"cfg.DATASET.{dataset_name.upper()}.ROOT") + # get mean and std corresponding to dataset + mean, std = get_mean_std(phase, cfg, dataset_name) + mean_eval, std_eval = get_mean_std("val", cfg, dataset_name) + + mean, std = reget_mean_std(cfg, dataset_name, mean, std) + mean_eval, std_eval = reget_mean_std(cfg, dataset_name, mean_eval, std_eval) + + # get WordVectorizer + # wordVectorizer = get_WordVectorizer(cfg, phase, dataset_name) + # get collect_fn + collate_fn = get_collate_fn(dataset_name, cfg, phase) + # get dataset module + + dataset = dataset_module_map[dataset_name.lower()]( + cfg=cfg, + batch_size=cfg.TRAIN.BATCH_SIZE, + num_workers=cfg.TRAIN.NUM_WORKERS, + debug=cfg.DEBUG, + collate_fn=collate_fn, + mean=mean, + std=std, + mean_eval=mean_eval, + std_eval=std_eval, + # w_vectorizer=wordVectorizer, + input_format=cfg.DATASET.MOTION_TYPE, + text_dir=pjoin(data_root, "texts"), + motion_dir=pjoin(data_root, motion_subdir[dataset_name]), + max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, + min_motion_length=cfg.DATASET.SAMPLER.MIN_LEN, + max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, + unit_length=eval( + f"cfg.DATASET.{dataset_name.upper()}.UNIT_LEN"), + ) + datasets.append(dataset) + + else: + raise NotImplementedError + + if input_format == 'root_body_pos_vel_hand_pos_vel': + cfg.DATASET.NFEATS = 313 + else: + cfg.DATASET.NFEATS = datasets[0].nfeats + + cfg.DATASET.NJOINTS = datasets[0].njoints + return datasets diff --git a/Evaluator_272/mld/data/humanml/__init__.py b/Evaluator_272/mld/data/humanml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/data/humanml/common/quaternion.py b/Evaluator_272/mld/data/humanml/common/quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..dca3d890080a4e91e3f275f442b0aed006562881 --- /dev/null +++ b/Evaluator_272/mld/data/humanml/common/quaternion.py @@ -0,0 +1,423 @@ +# Copyright (c) 2018-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch +import numpy as np + +_EPS4 = np.finfo(float).eps * 4.0 + +_FLOAT_EPS = np.finfo(np.float64).eps + +# PyTorch-backed implementations +def qinv(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + mask = torch.ones_like(q) + mask[..., 1:] = -mask[..., 1:] + return q * mask + + +def qinv_np(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return qinv(torch.from_numpy(q).float()).numpy() + + +def qnormalize(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return q / torch.norm(q, dim=-1, keepdim=True) + + +def qmul(q, r): + """ + Multiply quaternion(s) q with quaternion(s) r. + Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. + Returns q*r as a tensor of shape (*, 4). + """ + assert q.shape[-1] == 4 + assert r.shape[-1] == 4 + + original_shape = q.shape + + # Compute outer product + terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) + + w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] + x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] + y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] + z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] + return torch.stack((w, x, y, z), dim=1).view(original_shape) + + +def qrot(q, v): + """ + Rotate vector(s) v about the rotation described by quaternion(s) q. + Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, + where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + assert v.shape[-1] == 3 + assert q.shape[:-1] == v.shape[:-1] + + original_shape = list(v.shape) + # print(q.shape) + q = q.contiguous().view(-1, 4) + v = v.contiguous().view(-1, 3) + + qvec = q[:, 1:] + uv = torch.cross(qvec, v, dim=1) + uuv = torch.cross(qvec, uv, dim=1) + return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) + + +def qeuler(q, order, epsilon=0, deg=True): + """ + Convert quaternion(s) q to Euler angles. + Expects a tensor of shape (*, 4), where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + + original_shape = list(q.shape) + original_shape[-1] = 3 + q = q.view(-1, 4) + + q0 = q[:, 0] + q1 = q[:, 1] + q2 = q[:, 2] + q3 = q[:, 3] + + if order == 'xyz': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + elif order == 'yzx': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon)) + elif order == 'zxy': + x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'xzy': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon)) + elif order == 'yxz': + x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'zyx': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + else: + raise + + if deg: + return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi + else: + return torch.stack((x, y, z), dim=1).view(original_shape) + + +# Numpy-backed implementations + +def qmul_np(q, r): + q = torch.from_numpy(q).contiguous().float() + r = torch.from_numpy(r).contiguous().float() + return qmul(q, r).numpy() + + +def qrot_np(q, v): + q = torch.from_numpy(q).contiguous().float() + v = torch.from_numpy(v).contiguous().float() + return qrot(q, v).numpy() + + +def qeuler_np(q, order, epsilon=0, use_gpu=False): + if use_gpu: + q = torch.from_numpy(q).cuda().float() + return qeuler(q, order, epsilon).cpu().numpy() + else: + q = torch.from_numpy(q).contiguous().float() + return qeuler(q, order, epsilon).numpy() + + +def qfix(q): + """ + Enforce quaternion continuity across the time dimension by selecting + the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) + between two consecutive frames. + + Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints. + Returns a tensor of the same shape. + """ + assert len(q.shape) == 3 + assert q.shape[-1] == 4 + + result = q.copy() + dot_products = np.sum(q[1:] * q[:-1], axis=2) + mask = dot_products < 0 + mask = (np.cumsum(mask, axis=0) % 2).astype(bool) + result[1:][mask] *= -1 + return result + + +def euler2quat(e, order, deg=True): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.view(-1, 3) + + ## if euler angles in degrees + if deg: + e = e * np.pi / 180. + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1) + ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1) + rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.view(original_shape) + + +def expmap_to_quaternion(e): + """ + Convert axis-angle rotations (aka exponential maps) to quaternions. + Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". + Expects a tensor of shape (*, 3), where * denotes any number of dimensions. + Returns a tensor of shape (*, 4). + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.reshape(-1, 3) + + theta = np.linalg.norm(e, axis=1).reshape(-1, 1) + w = np.cos(0.5 * theta).reshape(-1, 1) + xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e + return np.concatenate((w, xyz), axis=1).reshape(original_shape) + + +def euler_to_quaternion(e, order): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.reshape(-1, 3) + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1) + ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1) + rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul_np(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.reshape(original_shape) + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def quaternion_to_matrix_np(quaternions): + q = torch.from_numpy(quaternions).contiguous().float() + return quaternion_to_matrix(q).numpy() + + +def quaternion_to_cont6d_np(quaternions): + rotation_mat = quaternion_to_matrix_np(quaternions) + cont_6d = np.concatenate([rotation_mat[..., 0], rotation_mat[..., 1]], axis=-1) + return cont_6d + + +def quaternion_to_cont6d(quaternions): + rotation_mat = quaternion_to_matrix(quaternions) + cont_6d = torch.cat([rotation_mat[..., 0], rotation_mat[..., 1]], dim=-1) + return cont_6d + + +def cont6d_to_matrix(cont6d): + assert cont6d.shape[-1] == 6, "The last dimension must be 6" + x_raw = cont6d[..., 0:3] + y_raw = cont6d[..., 3:6] + + x = x_raw / torch.norm(x_raw, dim=-1, keepdim=True) + z = torch.cross(x, y_raw, dim=-1) + z = z / torch.norm(z, dim=-1, keepdim=True) + + y = torch.cross(z, x, dim=-1) + + x = x[..., None] + y = y[..., None] + z = z[..., None] + + mat = torch.cat([x, y, z], dim=-1) + return mat + + +def cont6d_to_matrix_np(cont6d): + q = torch.from_numpy(cont6d).contiguous().float() + return cont6d_to_matrix(q).numpy() + + +def qpow(q0, t, dtype=torch.float): + ''' q0 : tensor of quaternions + t: tensor of powers + ''' + q0 = qnormalize(q0) + theta0 = torch.acos(q0[..., 0]) + + ## if theta0 is close to zero, add epsilon to avoid NaNs + mask = (theta0 <= 10e-10) * (theta0 >= -10e-10) + theta0 = (1 - mask) * theta0 + mask * 10e-10 + v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1) + + if isinstance(t, torch.Tensor): + q = torch.zeros(t.shape + q0.shape) + theta = t.view(-1, 1) * theta0.view(1, -1) + else: ## if t is a number + q = torch.zeros(q0.shape) + theta = t * theta0 + + q[..., 0] = torch.cos(theta) + q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1) + + return q.to(dtype) + + +def qslerp(q0, q1, t): + ''' + q0: starting quaternion + q1: ending quaternion + t: array of points along the way + + Returns: + Tensor of Slerps: t.shape + q0.shape + ''' + + q0 = qnormalize(q0) + q1 = qnormalize(q1) + q_ = qpow(qmul(q1, qinv(q0)), t) + + return qmul(q_, + q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous()) + + +def qbetween(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v = torch.cross(v0, v1) + w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1, + keepdim=True) + return qnormalize(torch.cat([w, v], dim=-1)) + + +def qbetween_np(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v0 = torch.from_numpy(v0).float() + v1 = torch.from_numpy(v1).float() + return qbetween(v0, v1).numpy() + + +def lerp(p0, p1, t): + if not isinstance(t, torch.Tensor): + t = torch.Tensor([t]) + + new_shape = t.shape + p0.shape + new_view_t = t.shape + torch.Size([1] * len(p0.shape)) + new_view_p = torch.Size([1] * len(t.shape)) + p0.shape + p0 = p0.view(new_view_p).expand(new_shape) + p1 = p1.view(new_view_p).expand(new_shape) + t = t.view(new_view_t).expand(new_shape) + + return p0 + t * (p1 - p0) diff --git a/Evaluator_272/mld/data/humanml/common/skeleton.py b/Evaluator_272/mld/data/humanml/common/skeleton.py new file mode 100644 index 0000000000000000000000000000000000000000..b2ae85ad14df8c1a8d77e689b1cffbc6c814a979 --- /dev/null +++ b/Evaluator_272/mld/data/humanml/common/skeleton.py @@ -0,0 +1,199 @@ +from .quaternion import * +import scipy.ndimage.filters as filters + +class Skeleton(object): + def __init__(self, offset, kinematic_tree, device): + self.device = device + self._raw_offset_np = offset.numpy() + self._raw_offset = offset.clone().detach().to(device).float() + self._kinematic_tree = kinematic_tree + self._offset = None + self._parents = [0] * len(self._raw_offset) + self._parents[0] = -1 + for chain in self._kinematic_tree: + for j in range(1, len(chain)): + self._parents[chain[j]] = chain[j-1] + + def njoints(self): + return len(self._raw_offset) + + def offset(self): + return self._offset + + def set_offset(self, offsets): + self._offset = offsets.clone().detach().to(self.device).float() + + def kinematic_tree(self): + return self._kinematic_tree + + def parents(self): + return self._parents + + # joints (batch_size, joints_num, 3) + def get_offsets_joints_batch(self, joints): + assert len(joints.shape) == 3 + _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone() + for i in range(1, self._raw_offset.shape[0]): + _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i] + + self._offset = _offsets.detach() + return _offsets + + # joints (joints_num, 3) + def get_offsets_joints(self, joints): + assert len(joints.shape) == 2 + _offsets = self._raw_offset.clone() + for i in range(1, self._raw_offset.shape[0]): + # print(joints.shape) + _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i] + + self._offset = _offsets.detach() + return _offsets + + # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder + # joints (batch_size, joints_num, 3) + def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False): + assert len(face_joint_idx) == 4 + '''Get Forward Direction''' + l_hip, r_hip, sdr_r, sdr_l = face_joint_idx + across1 = joints[:, r_hip] - joints[:, l_hip] + across2 = joints[:, sdr_r] - joints[:, sdr_l] + across = across1 + across2 + across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis] + # print(across1.shape, across2.shape) + + # forward (batch_size, 3) + forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + if smooth_forward: + forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest') + # forward (batch_size, 3) + forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis] + + '''Get Root Rotation''' + target = np.array([[0,0,1]]).repeat(len(forward), axis=0) + root_quat = qbetween_np(forward, target) + + '''Inverse Kinematics''' + # quat_params (batch_size, joints_num, 4) + # print(joints.shape[:-1]) + quat_params = np.zeros(joints.shape[:-1] + (4,)) + # print(quat_params.shape) + root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + quat_params[:, 0] = root_quat + # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + for chain in self._kinematic_tree: + R = root_quat + for j in range(len(chain) - 1): + # (batch, 3) + u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0) + # print(u.shape) + # (batch, 3) + v = joints[:, chain[j+1]] - joints[:, chain[j]] + v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis] + # print(u.shape, v.shape) + rot_u_v = qbetween_np(u, v) + + R_loc = qmul_np(qinv_np(R), rot_u_v) + + quat_params[:,chain[j + 1], :] = R_loc + R = qmul_np(R, R_loc) + + return quat_params + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + joints = torch.zeros(quat_params.shape[:-1] + (3,)).to(self.device) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach().to(self.device) + for i in range(1, len(chain)): + R = qmul(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]] + return joints + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(quat_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0) + for i in range(1, len(chain)): + R = qmul_np(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]] + return joints + + def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(cont6d_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix_np(cont6d_params[:, 0]) + else: + matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0) + for i in range(1, len(chain)): + matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]][..., np.newaxis] + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + # skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device) + joints[..., 0, :] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix(cont6d_params[:, 0]) + else: + matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach().to(cont6d_params.device) + for i in range(1, len(chain)): + matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]].unsqueeze(-1) + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + + + + diff --git a/Evaluator_272/mld/data/humanml/data/__init__.py b/Evaluator_272/mld/data/humanml/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/data/humanml/data/dataset.py b/Evaluator_272/mld/data/humanml/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..eb899d49f22f40d85243010323361b1620d86dce --- /dev/null +++ b/Evaluator_272/mld/data/humanml/data/dataset.py @@ -0,0 +1,227 @@ +import codecs as cs +import os +import random +from os.path import join as pjoin + +import numpy as np +import spacy +import torch +from rich.progress import track +from torch.utils import data +from torch.utils.data._utils.collate import default_collate +from tqdm import tqdm +import json + + +def collate_fn(batch): + batch.sort(key=lambda x: x[3], reverse=True) + return default_collate(batch) + + + +def findAllFile(base): + file_path = [] + for root, ds, fs in os.walk(base, followlinks=True): + for f in fs: + fullname = os.path.join(root, f) + file_path.append(fullname) + return file_path + + +class Text2MotionDatasetV2(data.Dataset): + + def __init__( + self, + mean, + std, + split_file, + max_motion_length, + min_motion_length, + max_text_len, + unit_length, + motion_dir, + text_dir, + input_format, + njoints, + tiny=False, + debug=False, + progress_bar=True, + **kwargs, + ): + + self.max_length = 20 + self.pointer = 0 + self.max_motion_length = max_motion_length + + self.min_motion_length = min_motion_length + self.max_text_len = max_text_len + self.unit_length = unit_length + data_dict = {} + id_list = [] + with cs.open(split_file, "r") as f: + for line in f.readlines(): + id_list.append(line.strip()) + self.id_list = id_list + if tiny or debug: + progress_bar = False + maxdata = 10 if tiny else 100 + else: + maxdata = 1e10 + + if progress_bar: + enumerator = enumerate( + track( + id_list, + f"Loading {split_file.split('/')[-2]} {split_file.split('/')[-1].split('.')[0]}", + )) + else: + enumerator = enumerate(id_list) + count = 0 + bad_count = 0 + miss_count = 0 + new_name_list = [] + length_list = [] + + for i, name in enumerator: + if count > maxdata: + break + try: + + motion = np.load(pjoin(motion_dir, name + ".npy")) + + if input_format == 'root_position': + motion = motion[..., :4+(njoints-1)*3] + elif input_format == 'root_position_vel': + motion = np.concatenate((motion[..., :4+(njoints - 1) * 3], motion[..., 4+(njoints - 1) * 9: 4+(njoints - 1) * 9 + njoints*3]), axis=-1) + elif input_format == 'root_position_rot6d': + motion = np.concatenate((motion[..., :4+(njoints - 1) * 3], motion[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=-1) + elif input_format == 'root_rot6d': + motion = np.concatenate((motion[..., :4], motion[..., 4+(njoints - 1) * 3: 4+(njoints - 1) * 9]), axis=-1) + elif input_format in ['vector_263', '']: + pass + else: + raise NotImplementedError + + + text_data = [] + flag = False + with cs.open(pjoin(text_dir, name + ".txt")) as f: + for line in f.readlines(): + text_dict = {} + line_split = line.strip().split("#") + caption = line_split[0] + tokens = line_split[1].split(" ") + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict["caption"] = caption + text_dict["tokens"] = tokens + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + try: + n_motion = motion[int(f_tag * 30):int(to_tag * 30)] + + new_name = ( + random.choice("ABCDEFGHIJKLMNOPQRSTUVW") + + "_" + name) + while new_name in data_dict: + new_name = (random.choice( + "ABCDEFGHIJKLMNOPQRSTUVW") + "_" + + name) + data_dict[new_name] = { + "motion": n_motion, + "length": len(n_motion), + "text": [text_dict], + } + new_name_list.append(new_name) + length_list.append(len(n_motion)) + except: + print(line_split) + print(line_split[2], line_split[3], f_tag, + to_tag, name) + + + if flag: + data_dict[name] = { + "motion": motion, + "length": len(motion), + "text": text_data, + } + new_name_list.append(name) + length_list.append(len(motion)) + count += 1 + + except: + miss_count += 1 + pass + + print(f'Here are {miss_count} not in dataset!') + + name_list, length_list = zip( + *sorted(zip(new_name_list, length_list), key=lambda x: x[1])) + + + + self.mean = mean + self.std = std + + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.nfeats = motion.shape[1] + self.name_list = name_list + self.reset_max_len(self.max_length) + + + def reset_max_len(self, length): + assert length <= self.max_motion_length + self.pointer = np.searchsorted(self.length_arr, length) + print("Pointer Pointing at %d" % self.pointer) + self.max_length = length + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.name_list) - self.pointer + + def __getitem__(self, item): + idx = self.pointer + item + data = self.data_dict[self.name_list[idx]] + + retrieval_name = self.name_list[idx].split('_')[-1] + + motion, m_length, text_list = data["motion"], data["length"], data["text"] + + # Randomly select a caption + text_data = random.choice(text_list) + # caption, tokens = text_data["caption"], text_data["tokens"] + caption = text_data["caption"] + + # Crop the motions in to times of 4, and introduce small variations + if self.unit_length < 10: + coin2 = np.random.choice(["single", "single", "double"]) + else: + coin2 = "single" + + if coin2 == "double": + m_length = (m_length // self.unit_length - 1) * self.unit_length + elif coin2 == "single": + m_length = (m_length // self.unit_length) * self.unit_length + idx = random.randint(0, len(motion) - m_length) + motion = motion[idx:idx + m_length] + "Normalization" + motion = (motion - self.mean) / self.std + + if np.any(np.isnan(motion)): + raise ValueError("nan in motion") + + return ( + caption, + motion, + m_length, + retrieval_name + ) diff --git a/Evaluator_272/mld/data/humanml/scripts/motion_process.py b/Evaluator_272/mld/data/humanml/scripts/motion_process.py new file mode 100644 index 0000000000000000000000000000000000000000..12bbbfa13ede245946339a417b1f8a1f36f7ac9f --- /dev/null +++ b/Evaluator_272/mld/data/humanml/scripts/motion_process.py @@ -0,0 +1,576 @@ +from os.path import join as pjoin + +from ..common.skeleton import Skeleton +import numpy as np +import os +from ..common.quaternion import * +from ..utils.paramUtil import * + +import torch +from tqdm import tqdm + +# positions (batch, joint_num, 3) +def uniform_skeleton(positions, target_offset): + src_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + src_offset = src_skel.get_offsets_joints(torch.from_numpy(positions[0])) + src_offset = src_offset.numpy() + tgt_offset = target_offset.numpy() + # print(src_offset) + # print(tgt_offset) + '''Calculate Scale Ratio as the ratio of legs''' + src_leg_len = np.abs(src_offset[l_idx1]).max() + np.abs(src_offset[l_idx2]).max() + tgt_leg_len = np.abs(tgt_offset[l_idx1]).max() + np.abs(tgt_offset[l_idx2]).max() + + scale_rt = tgt_leg_len / src_leg_len + # print(scale_rt) + src_root_pos = positions[:, 0] + tgt_root_pos = src_root_pos * scale_rt + + '''Inverse Kinematics''' + quat_params = src_skel.inverse_kinematics_np(positions, face_joint_indx) + # print(quat_params.shape) + + '''Forward Kinematics''' + src_skel.set_offset(target_offset) + new_joints = src_skel.forward_kinematics_np(quat_params, tgt_root_pos) + return new_joints + + +def extract_features(positions, feet_thre, n_raw_offsets, kinematic_chain, face_joint_indx, fid_r, fid_l): + global_positions = positions.copy() + """ Get Foot Contacts """ + + def foot_detect(positions, thres): + velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0]) + + feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2 + feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2 + feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2 + # feet_l_h = positions[:-1,fid_l,1] + # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float64) + feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float64) + + feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2 + feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2 + feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2 + # feet_r_h = positions[:-1,fid_r,1] + # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float64) + feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float64) + return feet_l, feet_r + + # + feet_l, feet_r = foot_detect(positions, feet_thre) + # feet_l, feet_r = foot_detect(positions, 0.002) + + '''Quaternion and Cartesian representation''' + r_rot = None + + def get_rifke(positions): + '''Local pose''' + positions[..., 0] -= positions[:, 0:1, 0] + positions[..., 2] -= positions[:, 0:1, 2] + '''All pose face Z+''' + positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions) + return positions + + def get_quaternion(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False) + + '''Fix Quaternion Discontinuity''' + quat_params = qfix(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + quat_params[1:, 0] = r_velocity + # (seq_len, joints_num, 4) + return quat_params, r_velocity, velocity, r_rot + + def get_cont6d_params(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True) + + '''Quaternion to continuous 6D''' + cont_6d_params = quaternion_to_cont6d_np(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + # (seq_len, joints_num, 4) + return cont_6d_params, r_velocity, velocity, r_rot + + cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions) + positions = get_rifke(positions) + + # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0) + # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]]) + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r') + # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g') + # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + '''Root height''' + root_y = positions[:, 0, 1:2] + + '''Root rotation and linear velocity''' + # (seq_len-1, 1) rotation velocity along y-axis + # (seq_len-1, 2) linear velovity on xz plane + r_velocity = np.arcsin(r_velocity[:, 2:3]) + l_velocity = velocity[:, [0, 2]] + # print(r_velocity.shape, l_velocity.shape, root_y.shape) + root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1) + + '''Get Joint Rotation Representation''' + # (seq_len, (joints_num-1) *6) quaternion for skeleton joints + rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1) + + '''Get Joint Rotation Invariant Position Represention''' + # (seq_len, (joints_num-1)*3) local joint position + ric_data = positions[:, 1:].reshape(len(positions), -1) + + '''Get Joint Velocity Representation''' + # (seq_len-1, joints_num*3) + local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1), + global_positions[1:] - global_positions[:-1]) + local_vel = local_vel.reshape(len(local_vel), -1) + + data = root_data + data = np.concatenate([data, ric_data[:-1]], axis=-1) + data = np.concatenate([data, rot_data[:-1]], axis=-1) + # print(dataset.shape, local_vel.shape) + data = np.concatenate([data, local_vel], axis=-1) + data = np.concatenate([data, feet_l, feet_r], axis=-1) + + return data + + +def process_file(positions, feet_thre): + # (seq_len, joints_num, 3) + # '''Down Sample''' + # positions = positions[::ds_num] + + '''Uniform Skeleton''' + positions = uniform_skeleton(positions, tgt_offsets) + + '''Put on Floor''' + floor_height = positions.min(axis=0).min(axis=0)[1] + positions[:, :, 1] -= floor_height + # print(floor_height) + + # plot_3d_motion("./positions_1.mp4", kinematic_chain, positions, 'title', fps=20) + + '''XZ at origin''' + root_pos_init = positions[0] + root_pose_init_xz = root_pos_init[0] * np.array([1, 0, 1]) + positions = positions - root_pose_init_xz + + # '''Move the first pose to origin ''' + # root_pos_init = positions[0] + # positions = positions - root_pos_init[0] + + '''All initially face Z+''' + r_hip, l_hip, sdr_r, sdr_l = face_joint_indx + across1 = root_pos_init[r_hip] - root_pos_init[l_hip] + across2 = root_pos_init[sdr_r] - root_pos_init[sdr_l] + across = across1 + across2 + across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis] + + # forward (3,), rotate around y-axis + forward_init = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + # forward (3,) + forward_init = forward_init / np.sqrt((forward_init ** 2).sum(axis=-1))[..., np.newaxis] + + # print(forward_init) + + target = np.array([[0, 0, 1]]) + root_quat_init = qbetween_np(forward_init, target) + root_quat_init = np.ones(positions.shape[:-1] + (4,)) * root_quat_init + + positions_b = positions.copy() + + positions = qrot_np(root_quat_init, positions) + + # plot_3d_motion("./positions_2.mp4", kinematic_chain, positions, 'title', fps=20) + + '''New ground truth positions''' + global_positions = positions.copy() + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(positions[:, 0, 0], positions[:, 0, 2], marker='o', color='r') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + """ Get Foot Contacts """ + + def foot_detect(positions, thres): + velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0]) + + feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2 + feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2 + feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2 + # feet_l_h = positions[:-1,fid_l,1] + # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float64) + feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float64) + + feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2 + feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2 + feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2 + # feet_r_h = positions[:-1,fid_r,1] + # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float64) + feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float64) + return feet_l, feet_r + # + feet_l, feet_r = foot_detect(positions, feet_thre) + # feet_l, feet_r = foot_detect(positions, 0.002) + + '''Quaternion and Cartesian representation''' + r_rot = None + + def get_rifke(positions): + '''Local pose''' + positions[..., 0] -= positions[:, 0:1, 0] + positions[..., 2] -= positions[:, 0:1, 2] + '''All pose face Z+''' + positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions) + return positions + + def get_quaternion(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False) + + '''Fix Quaternion Discontinuity''' + quat_params = qfix(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + quat_params[1:, 0] = r_velocity + # (seq_len, joints_num, 4) + return quat_params, r_velocity, velocity, r_rot + + def get_cont6d_params(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True) + + '''Quaternion to continuous 6D''' + cont_6d_params = quaternion_to_cont6d_np(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + # (seq_len, joints_num, 4) + return cont_6d_params, r_velocity, velocity, r_rot + + cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions) + positions = get_rifke(positions) + + # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0) + # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]]) + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r') + # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g') + # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + '''Root height''' + root_y = positions[:, 0, 1:2] + + '''Root rotation and linear velocity''' + # (seq_len-1, 1) rotation velocity along y-axis + # (seq_len-1, 2) linear velovity on xz plane + r_velocity = np.arcsin(r_velocity[:, 2:3]) + l_velocity = velocity[:, [0, 2]] + # print(r_velocity.shape, l_velocity.shape, root_y.shape) + root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1) + + '''Get Joint Rotation Representation''' + # (seq_len, (joints_num-1) *6) quaternion for skeleton joints + rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1) + + '''Get Joint Rotation Invariant Position Represention''' + # (seq_len, (joints_num-1)*3) local joint position + ric_data = positions[:, 1:].reshape(len(positions), -1) + + '''Get Joint Velocity Representation''' + # (seq_len-1, joints_num*3) + local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1), + global_positions[1:] - global_positions[:-1]) + local_vel = local_vel.reshape(len(local_vel), -1) + + data = root_data + data = np.concatenate([data, ric_data[:-1]], axis=-1) + data = np.concatenate([data, rot_data[:-1]], axis=-1) + # print(dataset.shape, local_vel.shape) + data = np.concatenate([data, local_vel], axis=-1) + data = np.concatenate([data, feet_l, feet_r], axis=-1) + + return data, global_positions, positions, l_velocity + + +# Recover global angle and positions for rotation dataset +# root_rot_velocity (B, seq_len, 1) +# root_linear_velocity (B, seq_len, 2) +# root_y (B, seq_len, 1) +# ric_data (B, seq_len, (joint_num - 1)*3) +# rot_data (B, seq_len, (joint_num - 1)*6) +# local_velocity (B, seq_len, joint_num*3) +# foot contact (B, seq_len, 4) +def recover_root_rot_pos(data): + rot_vel = data[..., 0] + r_rot_ang = torch.zeros_like(rot_vel).to(data.device) + '''Get Y-axis rotation from rotation velocity''' + r_rot_ang[..., 1:] = rot_vel[..., :-1] + r_rot_ang = torch.cumsum(r_rot_ang, dim=-1) + + r_rot_quat = torch.zeros(data.shape[:-1] + (4,)).to(data.device) + r_rot_quat[..., 0] = torch.cos(r_rot_ang) + r_rot_quat[..., 2] = torch.sin(r_rot_ang) + + r_pos = torch.zeros(data.shape[:-1] + (3,)).to(data.device) + r_pos[..., 1:, [0, 2]] = data[..., :-1, 1:3] + '''Add Y-axis rotation to root position''' + r_pos = qrot(qinv(r_rot_quat), r_pos) + + r_pos = torch.cumsum(r_pos, dim=-2) + + r_pos[..., 1] = data[..., 3] + return r_rot_quat, r_pos + + +def recover_from_rot(data, joints_num, skeleton): + r_rot_quat, r_pos = recover_root_rot_pos(data) + + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + + start_indx = 1 + 2 + 1 + (joints_num - 1) * 3 + end_indx = start_indx + (joints_num - 1) * 6 + cont6d_params = data[..., start_indx:end_indx] + # print(r_rot_cont6d.shape, cont6d_params.shape, r_pos.shape) + cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + + positions = skeleton.forward_kinematics_cont6d(cont6d_params, r_pos) + + return positions + +def recover_from_root_rot6d(data, joints_num, skeleton): + + r_rot_quat, r_pos = recover_root_rot_pos(data) + + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + + start_indx = 1 + 2 + 1 + end_indx = start_indx + (joints_num - 1) * 6 + cont6d_params = data[..., start_indx:end_indx] + # print(r_rot_cont6d.shape, cont6d_params.shape, r_pos.shape) + cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + r_pos = r_pos.view(-1,3) + positions = skeleton.forward_kinematics_cont6d(cont6d_params, r_pos) + return positions + +def recover_from_body_pos_vel_hand_rot(data, joints_num, skeleton): + assert len(skeleton) == 2 + body_skel = skeleton[0] + all_skel = skeleton[1] + assert joints_num == 52 + face_joint_indx = [2, 1, 17, 16] + + r_rot_quat, r_pos = recover_root_rot_pos(data) + + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + + pos_body_data = data[..., : 4 + 21 * 3] + pos_body_data_global = recover_from_ric(pos_body_data, 22) + # pos_body_data_global shape (bs, frame, 22, 3) + quat_params = body_skel.inverse_kinematics(pos_body_data_global, face_joint_indx) + bs = quat_params.shape[0] + frame = quat_params.shape[1] + cont6d_params = quaternion_to_cont6d(quat_params).view(bs, frame, -1) + + # cont6d_params + rot6d_hand_data = data[..., 4 + 21 * 3: 4 + 21 * 3 + 30 * 6] + + cont6d_params = torch.cat([cont6d_params, rot6d_hand_data], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + r_pos = r_pos.view(-1,3) + positions = all_skel.forward_kinematics_cont6d(cont6d_params, r_pos) + return positions + + +def recover_rot(data): + # dataset [bs, seqlen, 263/251] HumanML/KIT + joints_num = 22 if data.shape[-1] == 263 else 21 + r_rot_quat, r_pos = recover_root_rot_pos(data) + r_pos_pad = torch.cat([r_pos, torch.zeros_like(r_pos)], dim=-1).unsqueeze(-2) + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + start_indx = 1 + 2 + 1 + (joints_num - 1) * 3 + end_indx = start_indx + (joints_num - 1) * 6 + cont6d_params = data[..., start_indx:end_indx] + cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + cont6d_params = torch.cat([cont6d_params, r_pos_pad], dim=-2) + return cont6d_params + + +def recover_from_ric(data, joints_num): + r_rot_quat, r_pos = recover_root_rot_pos(data) + positions = data[..., 4:(joints_num - 1) * 3 + 4] + positions = positions.view(positions.shape[:-1] + (-1, 3)) + + '''Add Y-axis rotation to local joints''' + positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions) + + '''Add root XZ to joints''' + positions[..., 0] += r_pos[..., 0:1] + positions[..., 2] += r_pos[..., 2:3] + + '''Concate root and joints''' + positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2) + + return positions + + +''' +For Text2Motion Dataset +''' +''' +if __name__ == "__main__": + example_id = "000021" + # Lower legs + l_idx1, l_idx2 = 5, 8 + # Right/Left foot + fid_r, fid_l = [8, 11], [7, 10] + # Face direction, r_hip, l_hip, sdr_r, sdr_l + face_joint_indx = [2, 1, 17, 16] + # l_hip, r_hip + r_hip, l_hip = 2, 1 + joints_num = 22 + # ds_num = 8 + data_dir = '../dataset/pose_data_raw/joints/' + save_dir1 = '../dataset/pose_data_raw/new_joints/' + save_dir2 = '../dataset/pose_data_raw/new_joint_vecs/' + + n_raw_offsets = torch.from_numpy(t2m_raw_offsets) + kinematic_chain = t2m_kinematic_chain + + # Get offsets of target skeleton + example_data = np.load(os.path.join(data_dir, example_id + '.npy')) + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + # (joints_num, 3) + tgt_offsets = tgt_skel.get_offsets_joints(example_data[0]) + # print(tgt_offsets) + + source_list = os.listdir(data_dir) + frame_num = 0 + for source_file in tqdm(source_list): + source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num] + try: + dataset, ground_positions, positions, l_velocity = process_file(source_data, 0.002) + rec_ric_data = recover_from_ric(torch.from_numpy(dataset).unsqueeze(0).float(), joints_num) + np.save(pjoin(save_dir1, source_file), rec_ric_data.squeeze().numpy()) + np.save(pjoin(save_dir2, source_file), dataset) + frame_num += dataset.shape[0] + except Exception as e: + print(source_file) + print(e) + + print('Total clips: %d, Frames: %d, Duration: %fm' % + (len(source_list), frame_num, frame_num / 20 / 60)) +''' + +if __name__ == "__main__": + example_id = "03950_gt" + # Lower legs + l_idx1, l_idx2 = 17, 18 + # Right/Left foot + fid_r, fid_l = [14, 15], [19, 20] + # Face direction, r_hip, l_hip, sdr_r, sdr_l + face_joint_indx = [11, 16, 5, 8] + # l_hip, r_hip + r_hip, l_hip = 11, 16 + joints_num = 21 + # ds_num = 8 + data_dir = '../dataset/kit_mocap_dataset/joints/' + save_dir1 = '../dataset/kit_mocap_dataset/new_joints/' + save_dir2 = '../dataset/kit_mocap_dataset/new_joint_vecs/' + + n_raw_offsets = torch.from_numpy(kit_raw_offsets) + kinematic_chain = kit_kinematic_chain + + '''Get offsets of target skeleton''' + example_data = np.load(os.path.join(data_dir, example_id + '.npy')) + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + # (joints_num, 3) + tgt_offsets = tgt_skel.get_offsets_joints(example_data[0]) + # print(tgt_offsets) + + source_list = os.listdir(data_dir) + frame_num = 0 + '''Read source dataset''' + for source_file in tqdm(source_list): + source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num] + try: + name = ''.join(source_file[:-7].split('_')) + '.npy' + data, ground_positions, positions, l_velocity = process_file(source_data, 0.05) + rec_ric_data = recover_from_ric(torch.from_numpy(data).unsqueeze(0).float(), joints_num) + if np.isnan(rec_ric_data.numpy()).any(): + print(source_file) + continue + np.save(pjoin(save_dir1, name), rec_ric_data.squeeze().numpy()) + np.save(pjoin(save_dir2, name), data) + frame_num += data.shape[0] + except Exception as e: + print(source_file) + print(e) + + print('Total clips: %d, Frames: %d, Duration: %fm' % + (len(source_list), frame_num, frame_num / 12.5 / 60)) diff --git a/Evaluator_272/mld/data/humanml/utils/__init__.py b/Evaluator_272/mld/data/humanml/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/data/humanml/utils/metrics.py b/Evaluator_272/mld/data/humanml/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..8357de6035068fce1f67669e2f021175a61cd8f5 --- /dev/null +++ b/Evaluator_272/mld/data/humanml/utils/metrics.py @@ -0,0 +1,142 @@ +import numpy as np +from scipy import linalg + +def euclidean_distance_matrix(matrix1, matrix2): + """ + Params: + -- matrix1: N1 x D + -- matrix2: N2 x D + Returns: + -- dist: N1 x N2 + dist[i, j] == distance(matrix1[i], matrix2[j]) + """ + assert matrix1.shape[1] == matrix2.shape[1] + d1 = -2 * np.dot(matrix1, matrix2.T) + d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) + d3 = np.sum(np.square(matrix2), axis=1) + dists = np.sqrt(d1 + d2 + d3) + return dists + +def calculate_top_k(mat, top_k): + size = mat.shape[0] + gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1) + bool_mat = (mat == gt_mat) + correct_vec = False + top_k_list = [] + for i in range(top_k): + correct_vec = (correct_vec | bool_mat[:, i]) + top_k_list.append(correct_vec[:, None]) + top_k_mat = np.concatenate(top_k_list, axis=1) + return top_k_mat + + +def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False): + dist_mat = euclidean_distance_matrix(embedding1, embedding2) + argmax = np.argsort(dist_mat, axis=1) + top_k_mat = calculate_top_k(argmax, top_k) + if sum_all: + return top_k_mat.sum(axis=0) + else: + return top_k_mat + + +def calculate_matching_score(embedding1, embedding2, sum_all=False): + assert len(embedding1.shape) == 2 + assert embedding1.shape[0] == embedding2.shape[0] + assert embedding1.shape[1] == embedding2.shape[1] + + dist = linalg.norm(embedding1 - embedding2, axis=1) + if sum_all: + return dist.sum(axis=0) + else: + return dist + + + +def calculate_activation_statistics(activations): + """ + Params: + -- activation: num_samples x dim_feat + Returns: + -- mu: dim_feat + -- sigma: dim_feat x dim_feat + """ + mu = np.mean(activations, axis=0) + cov = np.cov(activations, rowvar=False) + return mu, cov + + +def calculate_diversity(activation, diversity_times): + assert len(activation.shape) == 2 + assert activation.shape[0] > diversity_times + num_samples = activation.shape[0] + + first_indices = np.random.choice(num_samples, diversity_times, replace=False) + second_indices = np.random.choice(num_samples, diversity_times, replace=False) + dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1) + return dist.mean() + + +def calculate_multimodality(activation, multimodality_times): + assert len(activation.shape) == 3 + assert activation.shape[1] > multimodality_times + num_per_sent = activation.shape[1] + + first_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) + second_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) + dist = linalg.norm(activation[:, first_dices] - activation[:, second_dices], axis=2) + return dist.mean() + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + Stable version by Dougal J. Sutherland. + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representative dataset set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representative dataset set. + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) \ No newline at end of file diff --git a/Evaluator_272/mld/data/humanml/utils/paramUtil.py b/Evaluator_272/mld/data/humanml/utils/paramUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..a9f1708b85ca80a9051cb3675cec9b999a0d0e2b --- /dev/null +++ b/Evaluator_272/mld/data/humanml/utils/paramUtil.py @@ -0,0 +1,63 @@ +import numpy as np + +# Define a kinematic tree for the skeletal struture +kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] + +kit_raw_offsets = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1] + ] +) + +t2m_raw_offsets = np.array([[0,0,0], + [1,0,0], + [-1,0,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,0,1], + [0,0,1], + [0,1,0], + [1,0,0], + [-1,0,0], + [0,0,1], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0]]) + +t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] +t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] +t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] + + +kit_tgt_skel_id = '03950' + +t2m_tgt_skel_id = '000021' + diff --git a/Evaluator_272/mld/data/humanml/utils/plot_script.py b/Evaluator_272/mld/data/humanml/utils/plot_script.py new file mode 100644 index 0000000000000000000000000000000000000000..0118cdac276b6c330730196953a5510a8e72f786 --- /dev/null +++ b/Evaluator_272/mld/data/humanml/utils/plot_script.py @@ -0,0 +1,103 @@ +import math +# import cv2 +from textwrap import wrap + +import matplotlib +import matplotlib.pyplot as plt +import mpl_toolkits.mplot3d.axes3d as p3 +import numpy as np +from matplotlib.animation import FFMpegFileWriter, FuncAnimation +from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d.art3d import Poly3DCollection + +import mld.data.humanml.utils.paramUtil as paramUtil + +skeleton = paramUtil.t2m_kinematic_chain + + +def list_cut_average(ll, intervals): + if intervals == 1: + return ll + + bins = math.ceil(len(ll) * 1.0 / intervals) + ll_new = [] + for i in range(bins): + l_low = intervals * i + l_high = l_low + intervals + l_high = l_high if l_high < len(ll) else len(ll) + ll_new.append(np.mean(ll[l_low:l_high])) + return ll_new + + +def plot_3d_motion(save_path, joints, title, figsize=(3, 3), fps=120, radius=3, kinematic_tree=skeleton): + matplotlib.use('Agg') + title = '\n'.join(wrap(title, 20)) + + def init(): + ax.set_xlim3d([-radius / 2, radius / 2]) + ax.set_ylim3d([0, radius]) + ax.set_zlim3d([-radius / 3., radius * 2 / 3.]) + fig.suptitle(title, fontsize=10) + ax.grid(b=False) + + def plot_xzPlane(minx, maxx, miny, minz, maxz): + verts = [ + [minx, miny, minz], + [minx, miny, maxz], + [maxx, miny, maxz], + [maxx, miny, minz] + ] + xz_plane = Poly3DCollection([verts]) + xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5)) + ax.add_collection3d(xz_plane) + + + data = joints.copy().reshape(len(joints), -1, 3) + fig = plt.figure(figsize=figsize) + plt.tight_layout() + ax = p3.Axes3D(fig) + init() + MINS = data.min(axis=0).min(axis=0) + MAXS = data.max(axis=0).max(axis=0) + + colors = ["#DD5A37", "#D69E00", "#B75A39", "#DD5A37", "#D69E00", + "#FF6D00", "#FF6D00", "#FF6D00", "#FF6D00", "#FF6D00", + "#DDB50E", "#DDB50E", "#DDB50E", "#DDB50E", "#DDB50E", ] + + frame_number = data.shape[0] + + height_offset = MINS[1] + data[:, :, 1] -= height_offset + trajec = data[:, 0, [0, 2]] + + data[..., 0] -= data[:, 0:1, 0] + data[..., 2] -= data[:, 0:1, 2] + + + def update(index): + + ax.view_init(elev=120, azim=-90) + ax.dist = 7.5 + plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1], + MAXS[2] - trajec[index, 1]) + + + for i, (chain, color) in enumerate(zip(kinematic_tree, colors)): + # print(color) + if i < 5: + linewidth = 4.0 + else: + linewidth = 2.0 + ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth, + color=color) + + plt.axis('off') + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_zticklabels([]) + + ani = FuncAnimation(fig, update, frames=frame_number, + interval=1000 / fps, repeat=False) + + ani.save(save_path, fps=fps) + plt.close() diff --git a/Evaluator_272/mld/data/humanml/utils/utils.py b/Evaluator_272/mld/data/humanml/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e7044995214e321a44b92f619e1039a5bb4fbe62 --- /dev/null +++ b/Evaluator_272/mld/data/humanml/utils/utils.py @@ -0,0 +1,163 @@ +import os +import numpy as np +# import cv2 +from PIL import Image +import paramUtil +import math +import time +import matplotlib.pyplot as plt +from scipy.ndimage import gaussian_filter + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + +MISSING_VALUE = -1 + +def save_image(image_numpy, image_path): + img_pil = Image.fromarray(image_numpy) + img_pil.save(image_path) + + +def save_logfile(log_loss, save_path): + with open(save_path, 'wt') as f: + for k, v in log_loss.items(): + w_line = k + for digit in v: + w_line += ' %.3f' % digit + f.write(w_line + '\n') + + +def print_current_loss(start_time, niter_state, losses, epoch=None, sub_epoch=None, + inner_iter=None, tf_ratio=None, sl_steps=None): + + def as_minutes(s): + m = math.floor(s / 60) + s -= m * 60 + return '%dm %ds' % (m, s) + + def time_since(since, percent): + now = time.time() + s = now - since + es = s / percent + rs = es - s + return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) + + if epoch is not None: + print('epoch: %3d niter: %6d sub_epoch: %2d inner_iter: %4d' % (epoch, niter_state, sub_epoch, inner_iter), end=" ") + + + now = time.time() + message = '%s'%(as_minutes(now - start_time)) + + for k, v in losses.items(): + message += ' %s: %.4f ' % (k, v) + message += ' sl_length:%2d tf_ratio:%.2f'%(sl_steps, tf_ratio) + print(message) + +def print_current_loss_decomp(start_time, niter_state, total_niters, losses, epoch=None, inner_iter=None): + + def as_minutes(s): + m = math.floor(s / 60) + s -= m * 60 + return '%dm %ds' % (m, s) + + def time_since(since, percent): + now = time.time() + s = now - since + es = s / percent + rs = es - s + return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) + + print('epoch: %03d inner_iter: %5d' % (epoch, inner_iter), end=" ") + # now = time.time() + message = '%s niter: %07d completed: %3d%%)'%(time_since(start_time, niter_state / total_niters), niter_state, niter_state / total_niters * 100) + for k, v in losses.items(): + message += ' %s: %.4f ' % (k, v) + print(message) + + +def compose_gif_img_list(img_list, fp_out, duration): + img, *imgs = [Image.fromarray(np.array(image)) for image in img_list] + img.save(fp=fp_out, format='GIF', append_images=imgs, optimize=False, + save_all=True, loop=0, duration=duration) + + +def save_images(visuals, image_path): + if not os.path.exists(image_path): + os.makedirs(image_path) + + for i, (label, img_numpy) in enumerate(visuals.items()): + img_name = '%d_%s.jpg' % (i, label) + save_path = os.path.join(image_path, img_name) + save_image(img_numpy, save_path) + + +def save_images_test(visuals, image_path, from_name, to_name): + if not os.path.exists(image_path): + os.makedirs(image_path) + + for i, (label, img_numpy) in enumerate(visuals.items()): + img_name = "%s_%s_%s" % (from_name, to_name, label) + save_path = os.path.join(image_path, img_name) + save_image(img_numpy, save_path) + + +def compose_and_save_img(img_list, save_dir, img_name, col=4, row=1, img_size=(256, 200)): + # print(col, row) + compose_img = compose_image(img_list, col, row, img_size) + if not os.path.exists(save_dir): + os.makedirs(save_dir) + img_path = os.path.join(save_dir, img_name) + compose_img.save(img_path) + + +def compose_image(img_list, col, row, img_size): + to_image = Image.new('RGB', (col * img_size[0], row * img_size[1])) + for y in range(0, row): + for x in range(0, col): + from_img = Image.fromarray(img_list[y * col + x]) + + paste_area = (x * img_size[0], y*img_size[1], + (x + 1) * img_size[0], (y + 1) * img_size[1]) + to_image.paste(from_img, paste_area) + return to_image + + +def plot_loss_curve(losses, save_path, intervals=500): + plt.figure(figsize=(10, 5)) + plt.title("Loss During Training") + for key in losses.keys(): + plt.plot(list_cut_average(losses[key], intervals), label=key) + plt.xlabel("Iterations/" + str(intervals)) + plt.ylabel("Loss") + plt.legend() + plt.savefig(save_path) + plt.show() + + +def list_cut_average(ll, intervals): + if intervals == 1: + return ll + + bins = math.ceil(len(ll) * 1.0 / intervals) + ll_new = [] + for i in range(bins): + l_low = intervals * i + l_high = l_low + intervals + l_high = l_high if l_high < len(ll) else len(ll) + ll_new.append(np.mean(ll[l_low:l_high])) + return ll_new + + +def motion_temporal_filter(motion, sigma=1): + motion = motion.reshape(motion.shape[0], -1) + for i in range(motion.shape[1]): + motion[:, i] = gaussian_filter(motion[:, i], sigma=sigma, mode="nearest") + return motion.reshape(motion.shape[0], -1, 3) + diff --git a/Evaluator_272/mld/data/humanml/utils/word_vectorizer.py b/Evaluator_272/mld/data/humanml/utils/word_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..dc48321b4c8ba11607610ab1cb220fba23b9febf --- /dev/null +++ b/Evaluator_272/mld/data/humanml/utils/word_vectorizer.py @@ -0,0 +1,143 @@ +import numpy as np +import pickle +from os.path import join as pjoin + +POS_enumerator = { + 'VERB': 0, + 'NOUN': 1, + 'DET': 2, + 'ADP': 3, + 'NUM': 4, + 'AUX': 5, + 'PRON': 6, + 'ADJ': 7, + 'ADV': 8, + 'Loc_VIP': 9, + 'Body_VIP': 10, + 'Obj_VIP': 11, + 'Act_VIP': 12, + 'Desc_VIP': 13, + 'OTHER': 14, +} + +Loc_list = ('left', 'right', 'clockwise', 'counterclockwise', 'anticlockwise', 'forward', 'back', 'backward', + 'up', 'down', 'straight', 'curve') + +Body_list = ('arm', 'chin', 'foot', 'feet', 'face', 'hand', 'mouth', 'leg', 'waist', 'eye', 'knee', 'shoulder', 'thigh') + +Obj_List = ('stair', 'dumbbell', 'chair', 'window', 'floor', 'car', 'ball', 'handrail', 'baseball', 'basketball') + +Act_list = ('walk', 'run', 'swing', 'pick', 'bring', 'kick', 'put', 'squat', 'throw', 'hop', 'dance', 'jump', 'turn', + 'stumble', 'dance', 'stop', 'sit', 'lift', 'lower', 'raise', 'wash', 'stand', 'kneel', 'stroll', + 'rub', 'bend', 'balance', 'flap', 'jog', 'shuffle', 'lean', 'rotate', 'spin', 'spread', 'climb') + +Desc_list = ('slowly', 'carefully', 'fast', 'careful', 'slow', 'quickly', 'happy', 'angry', 'sad', 'happily', + 'angrily', 'sadly') + +VIP_dict = { + 'Loc_VIP': Loc_list, + 'Body_VIP': Body_list, + 'Obj_VIP': Obj_List, + 'Act_VIP': Act_list, + 'Desc_VIP': Desc_list, +} + + +class WordVectorizer(object): + def __init__(self, meta_root, prefix, text_encode_way): + + self.text_encode_way = text_encode_way + + vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix)) + words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb')) + word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb')) + self.word2vec = {w: vectors[word2idx[w]] for w in words} + + if 'glove_6B' in self.text_encode_way: + from torchtext.vocab import GloVe + glove_6b = GloVe(name='6B', dim=300) + self.word2vec_glove_6b = glove_6b.get_vecs_by_tokens + + def _get_pos_ohot(self, pos): + pos_vec = np.zeros(len(POS_enumerator)) + if pos in POS_enumerator: + pos_vec[POS_enumerator[pos]] = 1 + else: + pos_vec[POS_enumerator['OTHER']] = 1 + return pos_vec + + def __len__(self): + return len(self.word2vec) + + def __getitem__(self, item): + word, pos = item.split('/') + if 'given_glove' in self.text_encode_way: + if word in self.word2vec: + word_vec = self.word2vec[word] + vip_pos = None + for key, values in VIP_dict.items(): + if word in values: + vip_pos = key + break + if vip_pos is not None: + pos_vec = self._get_pos_ohot(vip_pos) + else: + pos_vec = self._get_pos_ohot(pos) + else: + word_vec = self.word2vec['unk'] + pos_vec = self._get_pos_ohot('OTHER') + + elif 'glove_6B' in self.text_encode_way: + word_vec = self.word2vec_glove_6b([word]).squeeze() + + if word in self.word2vec: + vip_pos = None + for key, values in VIP_dict.items(): + if word in values: + vip_pos = key + break + if vip_pos is not None: + pos_vec = self._get_pos_ohot(vip_pos) + else: + pos_vec = self._get_pos_ohot(pos) + else: + pos_vec = self._get_pos_ohot('OTHER') + + + + return word_vec, pos_vec + +class WordVectorizer_only_text_token(object): + def __init__(self, meta_root, prefix, text_encode_way): + + self.text_encode_way = text_encode_way + + vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix)) + words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb')) + word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb')) + self.word2vec = {w: vectors[word2idx[w]] for w in words} + + if 'glove_6B' in self.text_encode_way: + from torchtext.vocab import GloVe + glove_6b = GloVe(name='6B', dim=300) + self.word2vec_glove_6b = glove_6b.get_vecs_by_tokens + + def __len__(self): + return len(self.word2vec) + + def __getitem__(self, item): + word = item + + if 'given_glove' in self.text_encode_way: + if word in self.word2vec: + word_vec = self.word2vec[word] + else: + word_vec = self.word2vec['unk'] + + elif 'glove_6B' in self.text_encode_way: + word_vec = self.word2vec_glove_6b([word]).squeeze() + + return word_vec + + + diff --git a/Evaluator_272/mld/data/sampling/__init__.py b/Evaluator_272/mld/data/sampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c9d0bea9d4a507a43e240a3644bdedf83a107e4 --- /dev/null +++ b/Evaluator_272/mld/data/sampling/__init__.py @@ -0,0 +1,2 @@ +from .base import FrameSampler +from .framerate import subsample, upsample diff --git a/Evaluator_272/mld/data/sampling/base.py b/Evaluator_272/mld/data/sampling/base.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab6a8a0fb9fa41d4a586d392a4722f55e34e689 --- /dev/null +++ b/Evaluator_272/mld/data/sampling/base.py @@ -0,0 +1,41 @@ +from .frames import get_frameix_from_data_index + +class FrameSampler: + def __init__(self, sampling="conseq", sampling_step=1, request_frames=None,threshold_reject=0.75,max_len=1000,min_len=10): + self.sampling = sampling + + self.sampling_step = sampling_step + self.request_frames = request_frames + self.threshold_reject = threshold_reject + self.max_len = max_len + self.min_len = min_len + + def __call__(self, num_frames): + + return get_frameix_from_data_index(num_frames, + self.request_frames, + self.sampling, + self.sampling_step) + + def accept(self, duration): + # Outputs have original lengths + # Check if it is too long + if self.request_frames is None: + if duration > self.max_len: + return False + elif duration < self.min_len: + return False + else: + # Reject sample if the length is + # too little relative to + # the request frames + min_number = self.threshold_reject * self.request_frames + if duration < min_number: + return False + return True + + def get(self, key, default=None): + return getattr(self, key, default) + + def __getitem__(self, key): + return getattr(self, key) diff --git a/Evaluator_272/mld/data/sampling/framerate.py b/Evaluator_272/mld/data/sampling/framerate.py new file mode 100644 index 0000000000000000000000000000000000000000..72dd08f0ff7e2fedfab55c9d04393a740aaab54f --- /dev/null +++ b/Evaluator_272/mld/data/sampling/framerate.py @@ -0,0 +1,32 @@ +import numpy as np + +def subsample(num_frames, last_framerate, new_framerate): + step = int(last_framerate / new_framerate) + assert step >= 1 + frames = np.arange(0, num_frames, step) + return frames + + + +def upsample(motion, last_framerate, new_framerate): + step = int(new_framerate / last_framerate) + assert step >= 1 + + # Alpha blending => interpolation + alpha = np.linspace(0, 1, step+1) + last = np.einsum("l,...->l...", 1-alpha, motion[:-1]) + new = np.einsum("l,...->l...", alpha, motion[1:]) + + chuncks = (last + new)[:-1] + output = np.concatenate(chuncks.swapaxes(1, 0)) + # Don't forget the last one + output = np.concatenate((output, motion[[-1]])) + return output + + +if __name__ == "__main__": + motion = np.arange(105) + submotion = motion[subsample(len(motion), 100.0, 12.5)] + newmotion = upsample(submotion, 12.5, 100) + + print(newmotion) diff --git a/Evaluator_272/mld/data/sampling/frames.py b/Evaluator_272/mld/data/sampling/frames.py new file mode 100644 index 0000000000000000000000000000000000000000..ab9a6ed47987d5d04651fa153f54741ad5442e64 --- /dev/null +++ b/Evaluator_272/mld/data/sampling/frames.py @@ -0,0 +1,58 @@ +from typing import Optional + +import numpy as np +from numpy import ndarray as Array +import random + + +def get_frameix_from_data_index(num_frames: int, + request_frames: Optional[int], + sampling: str = "conseq", + sampling_step: int = 1) -> Array: + nframes = num_frames + + if request_frames is None: + frame_ix = np.arange(nframes) + else: + + if request_frames > nframes: + fair = False # True + if fair: + # distills redundancy everywhere + choices = np.random.choice(range(nframes), + request_frames, + replace=True) + frame_ix = sorted(choices) + else: + # adding the last frame until done + ntoadd = max(0, request_frames - nframes) + lastframe = nframes - 1 + padding = lastframe * np.ones(ntoadd, dtype=int) + frame_ix = np.concatenate((np.arange(0, nframes), + padding)) + + elif sampling in ["conseq", "random_conseq"]: + step_max = (nframes - 1) // (request_frames - 1) + if sampling == "conseq": + if sampling_step == -1 or sampling_step * (request_frames - 1) >= nframes: + step = step_max + else: + step = sampling_step + elif sampling == "random_conseq": + step = random.randint(1, step_max) + + lastone = step * (request_frames - 1) + shift_max = nframes - lastone - 1 + shift = random.randint(0, max(0, shift_max - 1)) + frame_ix = shift + np.arange(0, lastone + 1, step) + + elif sampling == "random": + choices = np.random.choice(range(nframes), + request_frames, + replace=False) + frame_ix = sorted(choices) + + else: + raise ValueError("Sampling not recognized.") + + return frame_ix diff --git a/Evaluator_272/mld/data/utils.py b/Evaluator_272/mld/data/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b16acd6ba8b8eaafdba4cde0765f1bc812c25392 --- /dev/null +++ b/Evaluator_272/mld/data/utils.py @@ -0,0 +1,38 @@ +import torch + + +def lengths_to_mask(lengths): + max_len = max(lengths) + mask = torch.arange(max_len, device=lengths.device).expand( + len(lengths), max_len) < lengths.unsqueeze(1) + return mask + + +def collate_tensors(batch): + dims = batch[0].dim() + max_size = [max([b.size(i) for b in batch]) for i in range(dims)] + size = (len(batch), ) + tuple(max_size) + canvas = batch[0].new_zeros(size=size) + for i, b in enumerate(batch): + sub_tensor = canvas[i] + for d in range(dims): + sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) + sub_tensor.add_(b) + return canvas + +def mld_collate(batch): + notnone_batches = [b for b in batch if b is not None] + notnone_batches.sort(key=lambda x: x[2], reverse=True) + adapted_batch = { + "motion": + collate_tensors([torch.tensor(b[1]).float() for b in notnone_batches]), + "text": [b[0] for b in notnone_batches], + "length": [b[2] for b in notnone_batches], + "retrieval_name": [b[3] for b in notnone_batches] + } + return adapted_batch + + + + + diff --git a/Evaluator_272/mld/launch/__init__.py b/Evaluator_272/mld/launch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/launch/blender.py b/Evaluator_272/mld/launch/blender.py new file mode 100644 index 0000000000000000000000000000000000000000..cad6c9daa0461ee0bfecc5f76a40def4101d837a --- /dev/null +++ b/Evaluator_272/mld/launch/blender.py @@ -0,0 +1,23 @@ +# Fix blender path +import sys +import os +# local packages +sys.path.append(os.path.expanduser("~/.local/lib/python3.9/site-packages")) +import bpy +import os +from argparse import ArgumentParser + +# Monkey patch argparse such that +# blender / python / hydra parsing works +def parse_args(self, args=None, namespace=None): + if args is not None: + return self.parse_args_bak(args=args, namespace=namespace) + try: + idx = sys.argv.index("--") + args = sys.argv[idx+1:] # the list after '--' + except ValueError as e: # '--' not in the list: + args = [] + return self.parse_args_bak(args=args, namespace=namespace) + +setattr(ArgumentParser, 'parse_args_bak', ArgumentParser.parse_args) +setattr(ArgumentParser, 'parse_args', parse_args) diff --git a/Evaluator_272/mld/launch/prepare.py b/Evaluator_272/mld/launch/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..a9934211edb0bce0222ff74d53df6ae0fe6fa72e --- /dev/null +++ b/Evaluator_272/mld/launch/prepare.py @@ -0,0 +1,66 @@ +import os +import warnings +from pathlib import Path + +import hydra +from mld.tools.runid import generate_id +from omegaconf import OmegaConf + + +# Local paths +def code_path(path=""): + code_dir = hydra.utils.get_original_cwd() + code_dir = Path(code_dir) + return str(code_dir / path) + + +def working_path(path): + return str(Path(os.getcwd()) / path) + + +# fix the id for this run +ID = generate_id() + + +def generate_id(): + return ID + + +def get_last_checkpoint(path, ckpt_name="last.ckpt"): + output_dir = Path(hydra.utils.to_absolute_path(path)) + last_ckpt_path = output_dir / "checkpoints" / ckpt_name + return str(last_ckpt_path) + + +def get_kitname(load_amass_data: bool, load_with_rot: bool): + if not load_amass_data: + return "kit-mmm-xyz" + if load_amass_data and not load_with_rot: + return "kit-amass-xyz" + if load_amass_data and load_with_rot: + return "kit-amass-rot" + + +OmegaConf.register_new_resolver("code_path", code_path) +OmegaConf.register_new_resolver("working_path", working_path) +OmegaConf.register_new_resolver("generate_id", generate_id) +OmegaConf.register_new_resolver("absolute_path", hydra.utils.to_absolute_path) +OmegaConf.register_new_resolver("get_last_checkpoint", get_last_checkpoint) +OmegaConf.register_new_resolver("get_kitname", get_kitname) + + +# Remove warnings +warnings.filterwarnings( + "ignore", ".*Trying to infer the `batch_size` from an ambiguous collection.*" +) + +warnings.filterwarnings( + "ignore", ".*does not have many workers which may be a bottleneck*" +) + +warnings.filterwarnings( + "ignore", ".*Our suggested max number of worker in current system is*" +) + + +os.environ["NUMEXPR_MAX_THREADS"] = "24" diff --git a/Evaluator_272/mld/launch/tools.py b/Evaluator_272/mld/launch/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..119185262a35d89a4c99bc265dbccda554200d82 --- /dev/null +++ b/Evaluator_272/mld/launch/tools.py @@ -0,0 +1,9 @@ +from pathlib import Path +from omegaconf import DictConfig, OmegaConf +import hydra +import os + + +def resolve_cfg_path(cfg: DictConfig): + working_dir = os.getcwd() + cfg.working_dir = working_dir diff --git a/Evaluator_272/mld/models/__init__.py b/Evaluator_272/mld/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/__init__.py b/Evaluator_272/mld/models/architectures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/actor_vae.py b/Evaluator_272/mld/models/architectures/actor_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..be712ca758a22a6d0a387c8434620c8ca13a294a --- /dev/null +++ b/Evaluator_272/mld/models/architectures/actor_vae.py @@ -0,0 +1,258 @@ +from typing import List, Optional, Union +import numpy as np +import torch +import torch.nn as nn +from torch import Tensor, nn +from torch.distributions.distribution import Distribution +from mld.utils.temos_utils import lengths_to_mask +from mld.models.operator import PositionalEncoding + + +class ActorVae(nn.Module): + + def __init__(self, + ablation, + nfeats: int, + latent_dim: list = [1, 256], + ff_size: int = 1024, + num_layers: int = 9, + num_heads: int = 4, + dropout: float = 0.1, + is_vae: bool = True, + activation: str = "gelu", + position_embedding: str = "learned", + **kwargs) -> None: + + super().__init__() + + self.latent_size = latent_dim[0] + self.latent_dim = latent_dim[-1] + self.is_vae = is_vae + input_feats = nfeats + output_feats = nfeats + + self.encoder = ActorAgnosticEncoder(nfeats=input_feats, + vae=True, + latent_dim=self.latent_dim, + ff_size=ff_size, + num_layers=num_layers, + num_heads=num_heads, + dropout=dropout, + activation=activation, + **kwargs) + + self.decoder = ActorAgnosticDecoder(nfeats=output_feats, + vae=True, + latent_dim=self.latent_dim, + ff_size=ff_size, + num_layers=num_layers, + num_heads=num_heads, + dropout=dropout, + activation=activation, + **kwargs) + + def forward(self, features: Tensor, lengths: Optional[List[int]] = None): + # Temp + # Todo + # remove and test this function + print("Should Not enter here") + + z, dist = self.encode(features, lengths) + feats_rst = self.decode(z, lengths) + return feats_rst, z, dist + + def encode( + self, + features: Tensor, + lengths: Optional[List[int]] = None + ) -> Union[Tensor, Distribution]: + + dist = self.encoder(features, lengths) + if self.is_vae: + latent = sample_from_distribution(dist) + else: + latent = dist.unsqueeze(0) + + return latent, dist + + def decode(self, z: Tensor, lengths: List[int]): + + feats = self.decoder(z, lengths) + return feats + + +class ActorAgnosticEncoder(nn.Module): + + def __init__(self, + nfeats: int, + vae: bool, + latent_dim: int = 256, + ff_size: int = 1024, + num_layers: int = 4, + num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", + **kwargs) -> None: + super().__init__() + + input_feats = nfeats + self.vae = vae + self.skel_embedding = nn.Linear(input_feats, latent_dim) + + # Action agnostic: only one set of params + if vae: + self.mu_token = nn.Parameter(torch.randn(latent_dim)) + self.logvar_token = nn.Parameter(torch.randn(latent_dim)) + else: + self.emb_token = nn.Parameter(torch.randn(latent_dim)) + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + + seq_trans_encoder_layer = nn.TransformerEncoderLayer( + d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + + self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, + num_layers=num_layers) + + def forward( + self, + features: Tensor, + lengths: Optional[List[int]] = None + ) -> Union[Tensor, Distribution]: + if lengths is None: + lengths = [len(feature) for feature in features] + + device = features.device + + bs, nframes, nfeats = features.shape + mask = lengths_to_mask(lengths, device) + + x = features + # Embed each human poses into latent vectors + x = self.skel_embedding(x) + + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] + + # Each batch has its own set of tokens + if self.vae: + mu_token = torch.tile(self.mu_token, (bs, )).reshape(bs, -1) + logvar_token = torch.tile(self.logvar_token, + (bs, )).reshape(bs, -1) + + # adding the distribution tokens for all sequences + xseq = torch.cat((mu_token[None], logvar_token[None], x), 0) + + # create a bigger mask, to allow attend to mu and logvar + token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + else: + emb_token = torch.tile(self.emb_token, (bs, )).reshape(bs, -1) + + # adding the embedding token for all sequences + xseq = torch.cat((emb_token[None], x), 0) + + # create a bigger mask, to allow attend to emb + token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + + # add positional encoding + xseq = self.sequence_pos_encoding(xseq) + final = self.seqTransEncoder(xseq, src_key_padding_mask=~aug_mask) + + if self.vae: + mu, logvar = final[0], final[1] + std = logvar.exp().pow(0.5) + # https://github.com/kampta/pytorch-distributions/blob/master/gaussian_vae.py + dist = torch.distributions.Normal(mu, std) + return dist + else: + return final[0] + + +class ActorAgnosticDecoder(nn.Module): + + def __init__(self, + nfeats: int, + latent_dim: int = 256, + ff_size: int = 1024, + num_layers: int = 4, + num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", + **kwargs) -> None: + super().__init__() + + output_feats = nfeats + self.latent_dim = latent_dim + self.nfeats = nfeats + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + + seq_trans_decoder_layer = nn.TransformerDecoderLayer( + d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + + self.seqTransDecoder = nn.TransformerDecoder(seq_trans_decoder_layer, + num_layers=num_layers) + + self.final_layer = nn.Linear(latent_dim, output_feats) + + def forward(self, z: Tensor, lengths: List[int]): + mask = lengths_to_mask(lengths, z.device) + # latent_dim = z.shape[1] + bs, nframes = mask.shape + nfeats = self.nfeats + + # z = z[None] # sequence of 1 element for the memory + + # Construct time queries + time_queries = torch.zeros(nframes, + bs, + self.latent_dim, + device=z.device) + time_queries = self.sequence_pos_encoding(time_queries) + + # Pass through the transformer decoder + # with the latent vector for memory + output = self.seqTransDecoder(tgt=time_queries, + memory=z, + tgt_key_padding_mask=~mask) + + output = self.final_layer(output) + # zero for padded area + output[~mask.T] = 0 + # Pytorch Transformer: [Sequence, Batch size, ...] + feats = output.permute(1, 0, 2) + return feats + + +def sample_from_distribution( + dist, + *, + fact=1.0, + sample_mean=False, +) -> Tensor: + + if sample_mean: + return dist.loc.unsqueeze(0) + + # Reparameterization trick + if fact is None: + return dist.rsample().unsqueeze(0) + + # Resclale the eps + eps = dist.rsample() - dist.loc + z = dist.loc + fact * eps + + # add latent size + z = z.unsqueeze(0) + return z diff --git a/Evaluator_272/mld/models/architectures/fc.py b/Evaluator_272/mld/models/architectures/fc.py new file mode 100644 index 0000000000000000000000000000000000000000..91380acf73428b711bfcf1d19a63c16b61270c36 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/fc.py @@ -0,0 +1,100 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Encoder_FC(nn.Module): + def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, + latent_dim=256, **kargs): + super().__init__() + + self.modeltype = modeltype + self.njoints = njoints + self.nfeats = nfeats + self.num_frames = num_frames + self.num_classes = num_classes + self.translation = translation + self.pose_rep = pose_rep + self.glob = glob + self.glob_rot = glob_rot + + self.latent_dim = latent_dim + + self.activation = nn.GELU() + + self.input_dim = self.njoints*self.nfeats*self.num_frames+self.num_classes + + self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 512), + nn.GELU(), + nn.Linear(512, 256), + nn.GELU()) + if self.modeltype == "cvae": + self.mu = nn.Linear(256, self.latent_dim) + self.var = nn.Linear(256, self.latent_dim) + else: + self.final = nn.Linear(256, self.latent_dim) + + def forward(self, batch): + x, y = batch["x"], batch["y"] + bs, njoints, feats, nframes = x.size() + if (njoints * feats * nframes) != self.njoints*self.nfeats*self.num_frames: + raise ValueError("This model is not adapted with this input") + + if len(y.shape) == 1: # can give on hot encoded as input + y = F.one_hot(y, self.num_classes) + y = y.to(dtype=x.dtype) + x = x.reshape(bs, njoints*feats*nframes) + x = torch.cat((x, y), 1) + + x = self.fully_connected(x) + + if self.modeltype == "cvae": + return {"mu": self.mu(x), "logvar": self.var(x)} + else: + return {"z": self.final(x)} + + +class Decoder_FC(nn.Module): + def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, + latent_dim=256, **kargs): + super().__init__() + + self.modeltype = modeltype + self.njoints = njoints + self.nfeats = nfeats + self.num_frames = num_frames + self.num_classes = num_classes + self.translation = translation + self.pose_rep = pose_rep + self.glob = glob + self.glob_rot = glob_rot + + self.latent_dim = latent_dim + + self.input_dim = self.latent_dim + self.num_classes + self.output_dim = self.njoints*self.nfeats*self.num_frames + + self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 256), + nn.GELU(), + nn.Linear(256, 512), + nn.GELU(), + nn.Linear(512, self.output_dim), + nn.GELU()) + + def forward(self, batch): + z, y = batch["z"], batch["y"] + # z: [batch_size, latent_dim] + # y: [batch_size] + if len(y.shape) == 1: # can give on hot encoded as input + y = F.one_hot(y, self.num_classes) + y = y.to(dtype=z.dtype) # y: [batch_size, num_classes] + # z: [batch_size, latent_dim+num_classes] + z = torch.cat((z, y), dim=1) + + z = self.fully_connected(z) + + bs, _ = z.size() + + z = z.reshape(bs, self.njoints, self.nfeats, self.num_frames) + batch["output"] = z + return batch diff --git a/Evaluator_272/mld/models/architectures/gpt/clip.py b/Evaluator_272/mld/models/architectures/gpt/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..270b6d0ef5db571a142795e0159326871b482f9c --- /dev/null +++ b/Evaluator_272/mld/models/architectures/gpt/clip.py @@ -0,0 +1,90 @@ +import os +from typing import List, Union + +import torch +from torch import Tensor, nn +from torch.distributions.distribution import Distribution +from transformers import AutoModel, AutoTokenizer, CLIPTextModel, CLIPTokenizer + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + +import pytorch_lightning as pl +class TextEncoder(pl.LightningModule): + + def __init__( + self, + modelpath: str, + finetune: bool = False, + last_hidden_state: bool = False, + latent_dim: list = [1, 256], + ) -> None: + + super().__init__() + + self.latent_dim = latent_dim + + self.tokenizer = AutoTokenizer.from_pretrained(modelpath) + self.text_model = AutoModel.from_pretrained(modelpath) + + # Don't train the model + if not finetune: + self.text_model.training = False + for p in self.text_model.parameters(): + p.requires_grad = False + + # Then configure the model + self.max_length = self.tokenizer.model_max_length + if "clip" in modelpath: + self.text_encoded_dim = self.text_model.config.text_config.hidden_size + if last_hidden_state: + self.name = "clip_hidden" + else: + self.name = "clip" + elif "bert" in modelpath: + self.name = "bert" + self.text_encoded_dim = self.text_model.config.hidden_size + else: + raise ValueError(f"Model {modelpath} not supported") + + def forward(self, texts: List[str]): + # get prompt text embeddings + if self.name in ["clip", "clip_hidden"]: + text_inputs = self.tokenizer( + texts, + padding="max_length", + truncation=True, + max_length=self.max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + # split into max length Clip can handle + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + text_input_ids = text_input_ids[:, :self.tokenizer. + model_max_length] + elif self.name == "bert": + text_inputs = self.tokenizer(texts, + return_tensors="pt", + padding=True) + + # use pooled ouuput if latent dim is two-dimensional + # pooled = 0 if self.latent_dim[0] == 1 else 1 # (bs, seq_len, text_encoded_dim) -> (bs, text_encoded_dim) + # text encoder forward, clip must use get_text_features + if self.name == "clip": + # (batch_Size, text_encoded_dim) + text_embeddings = self.text_model.get_text_features( + text_input_ids.to(self.text_model.device)) + # (batch_Size, 1, text_encoded_dim) + text_embeddings = text_embeddings.unsqueeze(1) + elif self.name == "clip_hidden": + # (batch_Size, seq_length , text_encoded_dim) + text_embeddings = self.text_model.text_model( + text_input_ids.to(self.text_model.device)).last_hidden_state + elif self.name == "bert": + # (batch_Size, seq_length , text_encoded_dim) + text_embeddings = self.text_model( + **text_inputs.to(self.text_model.device)).last_hidden_state + else: + raise NotImplementedError(f"Model {self.name} not implemented") + + return text_embeddings diff --git a/Evaluator_272/mld/models/architectures/gpt/pos_encoding.py b/Evaluator_272/mld/models/architectures/gpt/pos_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..066be3e1f8a1636f7eaabd1c534b9c618ee3e9f8 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/gpt/pos_encoding.py @@ -0,0 +1,43 @@ +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn + +def PE1d_sincos(seq_length, dim): + """ + :param d_model: dimension of the model + :param length: length of positions + :return: length*d_model position matrix + """ + if dim % 2 != 0: + raise ValueError("Cannot use sin/cos positional encoding with " + "odd dim (got dim={:d})".format(dim)) + pe = torch.zeros(seq_length, dim) + position = torch.arange(0, seq_length).unsqueeze(1) + div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * + -(math.log(10000.0) / dim))) + pe[:, 0::2] = torch.sin(position.float() * div_term) + pe[:, 1::2] = torch.cos(position.float() * div_term) + + return pe.unsqueeze(1) + + +class PositionEmbedding(nn.Module): + """ + Absolute pos embedding (standard), learned. + """ + def __init__(self, seq_length, dim, dropout, grad=False): + super().__init__() + self.embed = nn.Parameter(data=PE1d_sincos(seq_length, dim), requires_grad=grad) + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x): + # x.shape: bs, seq_len, feat_dim + l = x.shape[1] + x = x.permute(1, 0, 2) + self.embed[:l].expand(x.permute(1, 0, 2).shape) + x = self.dropout(x.permute(1, 0, 2)) + return x + + \ No newline at end of file diff --git a/Evaluator_272/mld/models/architectures/gpt/t2m_trans.py b/Evaluator_272/mld/models/architectures/gpt/t2m_trans.py new file mode 100644 index 0000000000000000000000000000000000000000..10f9a0f24ac93f72602e7c65e8c7fe6c6778ddfe --- /dev/null +++ b/Evaluator_272/mld/models/architectures/gpt/t2m_trans.py @@ -0,0 +1,265 @@ +import math +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.distributions import Categorical +import mld.models.architectures.gpt.pos_encoding as pos_encoding +import random + +class AttentionPool2d(nn.Module): + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.flatten(start_dim=2).permute(2, 0, 1) + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) + x = x + self.positional_embedding[:, None, :].to(x.dtype) + x, _ = F.multi_head_attention_forward( + query=x[:1], key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + return x.squeeze(0) + +class Text2Motion_Transformer(nn.Module): + + def __init__(self, + num_vq=1024, + embed_dim=512, + clip_dim=512, + block_size=16, + num_layers=2, + n_head=8, + drop_out_rate=0.1, + fc_rate=4): + super().__init__() + self.trans_base = CrossCondTransBase(num_vq, embed_dim, clip_dim, block_size, num_layers, n_head, drop_out_rate, fc_rate) + self.trans_head = CrossCondTransHead(num_vq, embed_dim, block_size, num_layers, n_head, drop_out_rate, fc_rate) + self.block_size = block_size + self.num_vq = num_vq + + def get_block_size(self): + return self.block_size + + def forward(self, idxs, clip_feature): + ''' + Input: + idx: [32, 50] + clip_feature: [32, 768] + + Output: + logits: (32, 51, 513) + ''' + feat = self.trans_base(idxs, clip_feature) + logits = self.trans_head(feat) + return logits + + def sample(self, clip_feature, if_categorial=False): + for k in range(self.block_size): + if k == 0: + x = [] + else: + x = xs + logits = self.forward(x, clip_feature) + logits = logits[:, -1, :] + probs = F.softmax(logits, dim=-1) + if if_categorial: + dist = Categorical(probs) + idx = dist.sample() + if idx == self.num_vq: + break + idx = idx.unsqueeze(-1) + else: + _, idx = torch.topk(probs, k=1, dim=-1) + if idx[0] == self.num_vq: + break + # append to the sequence and continue + if k == 0: + xs = idx + else: + xs = torch.cat((xs, idx), dim=1) + + if k == self.block_size - 1: + return xs[:, :-1] + + + return xs + + + + +class CausalCrossConditionalSelfAttention(nn.Module): + + def __init__(self, embed_dim=512, block_size=16, n_head=8, drop_out_rate=0.1): + super().__init__() + assert embed_dim % 8 == 0 + # key, query, value projections for all heads + self.key = nn.Linear(embed_dim, embed_dim) + self.query = nn.Linear(embed_dim, embed_dim) + self.value = nn.Linear(embed_dim, embed_dim) + + self.attn_drop = nn.Dropout(drop_out_rate) + self.resid_drop = nn.Dropout(drop_out_rate) + + self.proj = nn.Linear(embed_dim, embed_dim) + # causal mask to ensure that attention is only applied to the left in the input sequence + self.register_buffer("mask", torch.tril(torch.ones(block_size, block_size)).view(1, 1, block_size, block_size)) + self.n_head = n_head + + def forward(self, x): + B, T, C = x.size() + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) + q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) + v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) + # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) + att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) + att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) + att = F.softmax(att, dim=-1) + att = self.attn_drop(att) + y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) + y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side + + # output projection + y = self.resid_drop(self.proj(y)) + return y + +class Block(nn.Module): + + def __init__(self, embed_dim=512, block_size=16, n_head=8, drop_out_rate=0.1, fc_rate=4): + super().__init__() + self.ln1 = nn.LayerNorm(embed_dim) + self.ln2 = nn.LayerNorm(embed_dim) + self.attn = CausalCrossConditionalSelfAttention(embed_dim, block_size, n_head, drop_out_rate) + self.mlp = nn.Sequential( + nn.Linear(embed_dim, fc_rate * embed_dim), + nn.GELU(), + nn.Linear(fc_rate * embed_dim, embed_dim), + nn.Dropout(drop_out_rate), + ) + + def forward(self, x): + x = x + self.attn(self.ln1(x)) + x = x + self.mlp(self.ln2(x)) + return x + +class CrossCondTransBase(nn.Module): + + def __init__(self, + num_vq=1024, + embed_dim=512, + clip_dim=512, + block_size=16, + num_layers=2, + n_head=8, + drop_out_rate=0.1, + fc_rate=4): + super().__init__() + self.tok_emb = nn.Embedding(num_vq + 2, embed_dim) + self.cond_emb = nn.Linear(clip_dim, embed_dim) + self.pos_embedding = nn.Embedding(block_size, embed_dim) + self.drop = nn.Dropout(drop_out_rate) + # transformer block + self.blocks = nn.Sequential(*[Block(embed_dim, block_size, n_head, drop_out_rate, fc_rate) for _ in range(num_layers)]) + self.pos_embed = pos_encoding.PositionEmbedding(block_size, embed_dim, 0.0, False) + # self.attention_pool = AttentionPool2d() + self.block_size = block_size + + self.apply(self._init_weights) + + + def get_block_size(self): + return self.block_size + + def _init_weights(self, module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def forward(self, idx, clip_feature): + if len(clip_feature.shape) == 3: + clip_feature = clip_feature.mean(axis=1, keepdim=False) + + assert len(clip_feature.shape) == 2 + if len(idx) == 0: + token_embeddings = self.cond_emb(clip_feature).unsqueeze(1) + else: + b, t = idx.size() + assert t <= self.block_size, "Cannot forward, model block size is exhausted." + # forward the Trans model + token_embeddings = self.tok_emb(idx) + token_embeddings = torch.cat([self.cond_emb(clip_feature).unsqueeze(1), token_embeddings], dim=1) + + x = self.pos_embed(token_embeddings) + x = self.blocks(x) + + return x + + +class CrossCondTransHead(nn.Module): + + def __init__(self, + num_vq=1024, + embed_dim=512, + block_size=16, + num_layers=2, + n_head=8, + drop_out_rate=0.1, + fc_rate=4): + super().__init__() + + self.blocks = nn.Sequential(*[Block(embed_dim, block_size, n_head, drop_out_rate, fc_rate) for _ in range(num_layers)]) + self.ln_f = nn.LayerNorm(embed_dim) + self.head = nn.Linear(embed_dim, num_vq + 1, bias=False) + self.block_size = block_size + + self.apply(self._init_weights) + + def get_block_size(self): + return self.block_size + + def _init_weights(self, module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def forward(self, x): + x = self.blocks(x) + x = self.ln_f(x) + logits = self.head(x) + return logits + + + + + + diff --git a/Evaluator_272/mld/models/architectures/gpt/wmr_text_encoder.py b/Evaluator_272/mld/models/architectures/gpt/wmr_text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..e0e10548bc454e5655b4ddc48b2f362e019de1a0 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/gpt/wmr_text_encoder.py @@ -0,0 +1,55 @@ +import os +from typing import List, Union + +import torch +from torch import Tensor, nn +from torch.distributions.distribution import Distribution +from transformers import AutoModel, AutoTokenizer, CLIPTextModel, CLIPTokenizer + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + +from mld.models.architectures.temos.motionencoder.actor import ActorAgnosticEncoder +from mld.models.architectures.temos.textencoder.distillbert_actor import DistilbertActorAgnosticEncoder +from collections import OrderedDict +import pytorch_lightning as pl + +class TextEncoder(pl.LightningModule): + + def __init__( + self, + modelpath: str, + finetune: bool = False, + last_hidden_state: bool = False, + latent_dim: list = [1, 256], + ) -> None: + + super().__init__() + + self.latent_dim = latent_dim + + model_dict = OrderedDict() + state_dict = torch.load(modelpath)["state_dict"] + + self.text_model = DistilbertActorAgnosticEncoder('distilbert-base-uncased', num_layers=4) + + for k, v in state_dict.items(): + # print(k) + if k.split(".")[0] == "textencoder": + name = k.replace("textencoder.", "") + model_dict[name] = v + + self.text_model.load_state_dict(model_dict, strict=True) + + if not finetune: + self.text_model.training = False + for p in self.text_model.parameters(): + p.requires_grad = False + + + + def forward(self, texts: List[str]): + feat_clip_text = self.text_model(texts).loc.to(self.text_model.device) + feat_clip_text = torch.cat((feat_clip_text, feat_clip_text), dim=1) + + return feat_clip_text diff --git a/Evaluator_272/mld/models/architectures/mld_bert.py b/Evaluator_272/mld/models/architectures/mld_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..3508dddef57cf9a5af6c4abc232d631d86b959b6 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/mld_bert.py @@ -0,0 +1,164 @@ +import torch +import os + +from typing import List, Union +from torch import nn, Tensor +from torch.distributions.distribution import Distribution + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + + +class MLDTextEncoder(nn.Module): + def __init__(self, + cfg, + modelpath: str, + finetune: bool = False, + vae: bool = True, + latent_dim: int = 256, + ff_size: int = 1024, + num_layers: int = 6, + num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", + **kwargs) -> None: + + super().__init__() + + from transformers import AutoTokenizer, AutoModel + from transformers import logging + + logging.set_verbosity_error() + # Tokenizer + os.environ["TOKENIZERS_PARALLELISM"] = "false" + + self.tokenizer = AutoTokenizer.from_pretrained(modelpath) + + # Text model + self.text_model = AutoModel.from_pretrained(modelpath) + # Don't train the model + if not finetune: + self.text_model.training = False + for p in self.text_model.parameters(): + p.requires_grad = False + + # Then configure the model + self.text_encoded_dim = self.text_model.config.hidden_size + self.text_encoded_dim = latent_dim # enable projection + # self.save_hyperparameters(logger=False) + + encoded_dim = self.text_model.config.hidden_size + + # Projection of the text-outputs into the latent space + self.projection = nn.Sequential(nn.ReLU(), + nn.Linear(encoded_dim, latent_dim)) + + # TransformerVAE adapted from ACTOR + # Action agnostic: only one set of params + + vae = False + if vae: + self.mu_token = nn.Parameter(torch.randn(latent_dim)) + self.logvar_token = nn.Parameter(torch.randn(latent_dim)) + else: + self.global_text_token = nn.Parameter(torch.randn(latent_dim)) + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + seq_trans_encoder_layer = nn.TransformerEncoderLayer( + d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, + num_layers=num_layers) + + + if self.is_action_branch: + action_trans_encoder_layer = nn.TransformerEncoderLayer( + d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + self.actionTransEncoder = nn.TransformerEncoder( + action_trans_encoder_layer, num_layers=num_layers) + self.mean_token = nn.Parameter(torch.randn(latent_dim)) + self.std_token = nn.Parameter(torch.randn(latent_dim)) + + def global_branch(self, x, mask): + bs = x.shape[0] + + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] + + + global_tokens = torch.tile(self.global_text_token, + (bs, )).reshape(bs, -1) + + if self.is_cross_token: + mean_tokens = torch.tile(self.mean_token, (bs, )).reshape(bs, -1) + std_tokens = torch.tile(self.std_token, (bs, )).reshape(bs, -1) + # adding the embedding token for all sequences + xseq = torch.cat( + (mean_tokens[None], std_tokens[None], global_tokens[None], x), + 0) + + # create a bigger mask, to allow attend to emb + token_mask = torch.ones((bs, 3), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + else: + # adding the embedding token for all sequences + xseq = torch.cat((global_tokens[None], x), 0) + + # create a bigger mask, to allow attend to global + token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + + # add positional encoding + xseq = self.sequence_pos_encoding(xseq) + # content encode + text_tokens = self.seqTransEncoder(xseq, + src_key_padding_mask=~aug_mask) + return text_tokens + + def action_branch(self, x, mask): + bs = x.shape[0] + mean_tokens = torch.tile(self.mean_token, (bs, )).reshape(bs, -1) + std_tokens = torch.tile(self.std_token, (bs, )).reshape(bs, -1) + + # adding the embedding token for all sequences + actionSeq = torch.cat((mean_tokens[None], std_tokens[None], x), 0) + + # create a bigger mask, to allow attend to emb + token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + + # Pass through the transformer decoder + # with the latent vector for memory + # add positional encoding + actionSeq = self.sequence_pos_encoding(actionSeq) + action_tokens = self.actionTransEncoder(actionSeq, + src_key_padding_mask=~aug_mask) + return action_tokens[0:2] + + def forward(self, texts: List[str]): + text_encoded, mask = self.get_last_hidden_state(texts, + return_mask=True) + text_emb = self.projection(text_encoded) + + return text_emb + + def get_last_hidden_state(self, + texts: List[str], + return_mask: bool = False + ): #-> Union[Tensor, tuple[Tensor, Tensor]]: + encoded_inputs = self.tokenizer(texts, + return_tensors="pt", + padding=True) + output = self.text_model(**encoded_inputs.to(self.text_model.device)) + if not return_mask: + return output.last_hidden_state + return output.last_hidden_state, encoded_inputs.attention_mask.to( + dtype=bool) diff --git a/Evaluator_272/mld/models/architectures/mld_clip.py b/Evaluator_272/mld/models/architectures/mld_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0ddf803717b8bfcaee134e6a57b28b4707653d --- /dev/null +++ b/Evaluator_272/mld/models/architectures/mld_clip.py @@ -0,0 +1,90 @@ +import os +from typing import List, Union + +import torch +from torch import Tensor, nn +from torch.distributions.distribution import Distribution +from transformers import AutoModel, AutoTokenizer, CLIPTextModel, CLIPTokenizer + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + + +class MldTextEncoder(nn.Module): + + def __init__( + self, + modelpath: str, + finetune: bool = False, + last_hidden_state: bool = False, + latent_dim: list = [1, 256], + ) -> None: + + super().__init__() + + self.latent_dim = latent_dim + + self.tokenizer = AutoTokenizer.from_pretrained(modelpath) + self.text_model = AutoModel.from_pretrained(modelpath) + + # Don't train the model + if not finetune: + self.text_model.training = False + for p in self.text_model.parameters(): + p.requires_grad = False + + # Then configure the model + self.max_length = self.tokenizer.model_max_length + if "clip" in modelpath: + self.text_encoded_dim = self.text_model.config.text_config.hidden_size + if last_hidden_state: + self.name = "clip_hidden" + else: + self.name = "clip" + elif "bert" in modelpath: + self.name = "bert" + self.text_encoded_dim = self.text_model.config.hidden_size + else: + raise ValueError(f"Model {modelpath} not supported") + + def forward(self, texts: List[str]): + # get prompt text embeddings + if self.name in ["clip", "clip_hidden"]: + text_inputs = self.tokenizer( + texts, + padding="max_length", + truncation=True, + max_length=self.max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + # split into max length Clip can handle + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + text_input_ids = text_input_ids[:, :self.tokenizer. + model_max_length] + elif self.name == "bert": + text_inputs = self.tokenizer(texts, + return_tensors="pt", + padding=True) + + # use pooled ouuput if latent dim is two-dimensional + # pooled = 0 if self.latent_dim[0] == 1 else 1 # (bs, seq_len, text_encoded_dim) -> (bs, text_encoded_dim) + # text encoder forward, clip must use get_text_features + if self.name == "clip": + # (batch_Size, text_encoded_dim) + text_embeddings = self.text_model.get_text_features( + text_input_ids.to(self.text_model.device)) + # (batch_Size, 1, text_encoded_dim) + text_embeddings = text_embeddings.unsqueeze(1) + elif self.name == "clip_hidden": + # (batch_Size, seq_length , text_encoded_dim) + text_embeddings = self.text_model.text_model( + text_input_ids.to(self.text_model.device)).last_hidden_state + elif self.name == "bert": + # (batch_Size, seq_length , text_encoded_dim) + text_embeddings = self.text_model( + **text_inputs.to(self.text_model.device)).last_hidden_state + else: + raise NotImplementedError(f"Model {self.name} not implemented") + + return text_embeddings diff --git a/Evaluator_272/mld/models/architectures/mld_denoiser.py b/Evaluator_272/mld/models/architectures/mld_denoiser.py new file mode 100644 index 0000000000000000000000000000000000000000..b3bd47f56fc0a5fbbebbe1dbe81afc0f3e1300ff --- /dev/null +++ b/Evaluator_272/mld/models/architectures/mld_denoiser.py @@ -0,0 +1,279 @@ +import torch +import torch.nn as nn +from torch import nn +from mld.models.architectures.tools.embeddings import (TimestepEmbedding, + Timesteps) +from mld.models.operator import PositionalEncoding +from mld.models.operator.cross_attention import (SkipTransformerEncoder, + TransformerDecoder, + TransformerDecoderLayer, + TransformerEncoder, + TransformerEncoderLayer) +from mld.models.operator.position_encoding import build_position_encoding +from mld.utils.temos_utils import lengths_to_mask + + +class MldDenoiser(nn.Module): + + def __init__(self, + ablation, + nfeats: int = 263, + condition: str = "text", + latent_dim: list = [1, 256], + ff_size: int = 1024, + num_layers: int = 6, + num_heads: int = 4, + dropout: float = 0.1, + normalize_before: bool = False, + activation: str = "gelu", + flip_sin_to_cos: bool = True, + return_intermediate_dec: bool = False, + position_embedding: str = "learned", + arch: str = "trans_enc", + freq_shift: int = 0, + guidance_scale: float = 7.5, + guidance_uncondp: float = 0.1, + text_encoded_dim: int = 768, + nclasses: int = 10, + **kwargs) -> None: + + super().__init__() + + self.latent_dim = latent_dim[-1] + self.text_encoded_dim = text_encoded_dim + self.condition = condition + self.abl_plus = False + self.ablation_skip_connection = ablation.SKIP_CONNECT + self.diffusion_only = ablation.VAE_TYPE == "no" + self.arch = arch + self.pe_type = ablation.DIFF_PE_TYPE + + if self.diffusion_only: + # assert self.arch == "trans_enc", "only implement encoder for diffusion-only" + self.pose_embd = nn.Linear(nfeats, self.latent_dim) + self.pose_proj = nn.Linear(self.latent_dim, nfeats) + + # emb proj + if self.condition in ["text", "text_uncond", "text_all", 'text_face', 'text_body', 'text_hand', 'text_face_body', "text_seperate", "only_pose_concat", "only_pose_fusion"]: + # text condition + # project time from text_encoded_dim to latent_dim + self.time_proj = Timesteps(text_encoded_dim, flip_sin_to_cos, + freq_shift) + self.time_embedding = TimestepEmbedding(text_encoded_dim, + self.latent_dim) + # project time+text to latent_dim + if text_encoded_dim != self.latent_dim: + # todo 10.24 debug why relu + self.emb_proj = nn.Sequential( + nn.ReLU(), nn.Linear(text_encoded_dim, self.latent_dim)) + elif self.condition in ['action']: + self.time_proj = Timesteps(self.latent_dim, flip_sin_to_cos, + freq_shift) + self.time_embedding = TimestepEmbedding(self.latent_dim, + self.latent_dim) + self.emb_proj = EmbedAction(nclasses, + self.latent_dim, + guidance_scale=guidance_scale, + guidance_uncodp=guidance_uncondp) + else: + raise TypeError(f"condition type {self.condition} not supported") + + if self.pe_type == "actor": + self.query_pos = PositionalEncoding(self.latent_dim, dropout) + self.mem_pos = PositionalEncoding(self.latent_dim, dropout) + elif self.pe_type == "mld": + self.query_pos = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + self.mem_pos = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + else: + raise ValueError("Not Support PE type") + + if self.arch == "trans_enc": + if self.ablation_skip_connection: + # use DETR transformer + encoder_layer = TransformerEncoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + encoder_norm = nn.LayerNorm(self.latent_dim) + self.encoder = SkipTransformerEncoder(encoder_layer, + num_layers, encoder_norm) + else: + # use torch transformer + encoder_layer = nn.TransformerEncoderLayer( + d_model=self.latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + self.encoder = nn.TransformerEncoder(encoder_layer, + num_layers=num_layers) + elif self.arch == "trans_dec": + decoder_layer = TransformerDecoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + decoder_norm = nn.LayerNorm(self.latent_dim) + self.decoder = TransformerDecoder( + decoder_layer, + num_layers, + decoder_norm, + return_intermediate=return_intermediate_dec, + ) + else: + raise ValueError(f"Not supported architechure{self.arch}!") + + def forward(self, + sample, + timestep, + encoder_hidden_states, + lengths=None, + **kwargs): + # 0. dimension matching + # sample [latent_dim[0], batch_size, latent_dim] <= [batch_size, latent_dim[0], latent_dim[1]] + sample = sample.permute(1, 0, 2) + + # 0. check lengths for no vae (diffusion only) + if lengths not in [None, []]: + mask = lengths_to_mask(lengths, sample.device) + + # 1. time_embedding + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timestep.expand(sample.shape[1]).clone() + time_emb = self.time_proj(timesteps) + time_emb = time_emb.to(dtype=sample.dtype) + # [1, bs, latent_dim] <= [bs, latent_dim] + time_emb = self.time_embedding(time_emb).unsqueeze(0) + + # 2. condition + time embedding + if self.condition in ["text", "text_uncond", "text_all", 'text_face', 'text_body', 'text_hand', 'text_face_body', "text_seperate", "only_pose_concat", "only_pose_fusion"]: + # text_emb [seq_len, batch_size, text_encoded_dim] <= [batch_size, seq_len, text_encoded_dim] + encoder_hidden_states = encoder_hidden_states.permute(1, 0, 2) + text_emb = encoder_hidden_states # [num_words, bs, latent_dim] + # textembedding projection + if self.text_encoded_dim != self.latent_dim: + # [1 or 2, bs, latent_dim] <= [1 or 2, bs, text_encoded_dim] + text_emb_latent = self.emb_proj(text_emb) + else: + text_emb_latent = text_emb + if self.abl_plus: + emb_latent = time_emb + text_emb_latent + else: + emb_latent = torch.cat((time_emb, text_emb_latent), 0) + elif self.condition in ['action']: + action_emb = self.emb_proj(encoder_hidden_states) + if self.abl_plus: + emb_latent = action_emb + time_emb + else: + emb_latent = torch.cat((time_emb, action_emb), 0) + else: + raise TypeError(f"condition type {self.condition} not supported") + + # 4. transformer + if self.arch == "trans_enc": + if self.diffusion_only: + sample = self.pose_embd(sample) + xseq = torch.cat((emb_latent, sample), axis=0) + else: + xseq = torch.cat((sample, emb_latent), axis=0) + + # if self.ablation_skip_connection: + # xseq = self.query_pos(xseq) + # tokens = self.encoder(xseq) + # else: + # # adding the timestep embed + # # [seqlen+1, bs, d] + # # todo change to query_pos_decoder + xseq = self.query_pos(xseq) + tokens = self.encoder(xseq) + + if self.diffusion_only: + sample = tokens[emb_latent.shape[0]:] + sample = self.pose_proj(sample) + + # zero for padded area + sample[~mask.T] = 0 + else: + sample = tokens[:sample.shape[0]] + + elif self.arch == "trans_dec": + if self.diffusion_only: + sample = self.pose_embd(sample) + + # tgt - [1 or 5 or 10, bs, latent_dim] + # memory - [token_num, bs, latent_dim] + sample = self.query_pos(sample) + emb_latent = self.mem_pos(emb_latent) + sample = self.decoder(tgt=sample, memory=emb_latent).squeeze(0) + + if self.diffusion_only: + sample = self.pose_proj(sample) + # zero for padded area + sample[~mask.T] = 0 + else: + raise TypeError("{self.arch} is not supoorted") + + # 5. [batch_size, latent_dim[0], latent_dim[1]] <= [latent_dim[0], batch_size, latent_dim[1]] + sample = sample.permute(1, 0, 2) + + return (sample, ) + + +class EmbedAction(nn.Module): + + def __init__(self, + num_actions, + latent_dim, + guidance_scale=7.5, + guidance_uncodp=0.1, + force_mask=False): + super().__init__() + self.nclasses = num_actions + self.guidance_scale = guidance_scale + self.action_embedding = nn.Parameter( + torch.randn(num_actions, latent_dim)) + + self.guidance_uncodp = guidance_uncodp + self.force_mask = force_mask + self._reset_parameters() + + def forward(self, input): + idx = input[:, 0].to(torch.long) # an index array must be long + output = self.action_embedding[idx] + if not self.training and self.guidance_scale > 1.0: + uncond, output = output.chunk(2) + uncond_out = self.mask_cond(uncond, force=True) + out = self.mask_cond(output) + output = torch.cat((uncond_out, out)) + + output = self.mask_cond(output) + + return output.unsqueeze(0) + + def mask_cond(self, output, force=False): + bs, d = output.shape + # classifer guidence + if self.force_mask or force: + return torch.zeros_like(output) + elif self.training and self.guidance_uncodp > 0.: + mask = torch.bernoulli( + torch.ones(bs, device=output.device) * + self.guidance_uncodp).view( + bs, 1) # 1-> use null_cond, 0-> use real cond + return output * (1. - mask) + else: + return output + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) diff --git a/Evaluator_272/mld/models/architectures/mld_dual_vae.py b/Evaluator_272/mld/models/architectures/mld_dual_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..b63844a3fb9b6fdbb9019a7b139127b912691221 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/mld_dual_vae.py @@ -0,0 +1,346 @@ +from functools import reduce +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor, nn +from torch.distributions.distribution import Distribution + +from mld.models.architectures.tools.embeddings import TimestepEmbedding, Timesteps +from mld.models.operator import PositionalEncoding +from mld.models.operator.cross_attention import ( + SkipTransformerEncoder, + SkipTransformerDecoder, + TransformerDecoder, + TransformerDecoderLayer, + TransformerEncoder, + TransformerEncoderLayer, +) +from mld.models.operator.position_encoding import build_position_encoding +from mld.utils.temos_utils import lengths_to_mask +""" +vae + +skip connection encoder +skip connection decoder + +mem for each decoder layer +""" + + +class MldDualVae(nn.Module): + + def __init__(self, + ablation, + nfeats: int, + latent_dim: list = [1, 256], + ff_size: int = 1024, + num_layers: int = 9, + num_heads: int = 4, + dropout: float = 0.1, + arch: str = "all_encoder", + normalize_before: bool = False, + activation: str = "gelu", + position_embedding: str = "learned", + **kwargs) -> None: + + super().__init__() + + assert nfeats == 313 + + + self.latent_size = latent_dim[0] + self.latent_dim = latent_dim[-1] + input_feats = nfeats + + body_input_feats = 4 + 21 * 3 + 22 * 3 + hand_input_feats = 30 * 3 + 30 * 3 + + output_feats = nfeats + + body_output_feats = 4 + 21 * 3 + 22 * 3 + hand_output_feats = 30 * 3 + 30 * 3 + + self.arch = arch + self.mlp_dist = ablation.MLP_DIST + self.pe_type = ablation.PE_TYPE + + if self.pe_type == "actor": + self.query_pos_encoder = PositionalEncoding( + self.latent_dim, dropout) + self.query_pos_decoder = PositionalEncoding( + self.latent_dim, dropout) + elif self.pe_type == "mld": + # self.query_pos_encoder = build_position_encoding( + # self.latent_dim, position_embedding=position_embedding) + self.body_query_pos_encoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + self.hand_query_pos_encoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + + # self.query_pos_decoder = build_position_encoding( + # self.latent_dim, position_embedding=position_embedding) + self.body_query_pos_decoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + self.hand_query_pos_decoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + + else: + raise ValueError("Not Support PE type") + + # encoder_layer = TransformerEncoderLayer( + # self.latent_dim, + # num_heads, + # ff_size, + # dropout, + # activation, + # normalize_before, + # ) + + body_encoder_layer = TransformerEncoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + + hand_encoder_layer = TransformerEncoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + + body_encoder_norm = nn.LayerNorm(self.latent_dim) + hand_encoder_norm = nn.LayerNorm(self.latent_dim) + + # self.encoder = SkipTransformerEncoder(encoder_layer, num_layers, + # encoder_norm) + + self.body_encoder = SkipTransformerEncoder(body_encoder_layer, num_layers, + body_encoder_norm) + self.hand_encoder = SkipTransformerEncoder(hand_encoder_layer, num_layers, + hand_encoder_norm) + + + if self.arch == "all_encoder": + decoder_norm = nn.LayerNorm(self.latent_dim) + self.decoder = SkipTransformerEncoder(encoder_layer, num_layers, + decoder_norm) + elif self.arch == "encoder_decoder": + + body_decoder_layer = TransformerDecoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + hand_decoder_layer = TransformerDecoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + body_decoder_norm = nn.LayerNorm(self.latent_dim) + hand_decoder_norm = nn.LayerNorm(self.latent_dim) + + self.body_decoder = SkipTransformerDecoder(body_decoder_layer, num_layers, + body_decoder_norm) + + self.hand_decoder = SkipTransformerDecoder(hand_decoder_layer, num_layers, + hand_decoder_norm) + + + else: + raise ValueError("Not support architecture!") + + if self.mlp_dist: + self.global_motion_token = nn.Parameter( + torch.randn(self.latent_size, self.latent_dim)) + self.dist_layer = nn.Linear(self.latent_dim, 2 * self.latent_dim) + else: + + + self.body_global_motion_token = nn.Parameter( + torch.randn(self.latent_size * 2, self.latent_dim)) + + self.hand_global_motion_token = nn.Parameter( + torch.randn(self.latent_size * 2, self.latent_dim)) + + # self.skel_embedding = nn.Linear(input_feats, self.latent_dim) + self.body_skel_embedding = nn.Linear(body_output_feats, self.latent_dim) + self.hand_skel_embedding = nn.Linear(hand_output_feats, self.latent_dim) + + # self.final_layer = nn.Linear(self.latent_dim, output_feats) + self.body_final_layer = nn.Linear(self.latent_dim, body_output_feats) + self.hand_final_layer = nn.Linear(self.latent_dim, hand_output_feats) + + + def forward(self, features: Tensor, lengths: Optional[List[int]] = None): + + print("Should Not enter here") + z, dist = self.encode(features, lengths) + feats_rst = self.decode(z, lengths) + return feats_rst, z, dist + + def encode( + self, + features: Tensor, + lengths: Optional[List[int]] = None + ) -> Union[Tensor, Distribution]: + if lengths is None: + lengths = [len(feature) for feature in features] + + device = features.device + + body_features = torch.cat((features[..., :4+21*3], features[..., 4+51*3:4+51*3+22*3]), dim=-1) # (32, 196, 133) + hand_features = torch.cat((features[..., 4+21*3:4+51*3], features[..., 4+51*3+22*3:]), dim=-1) # (132, 196, 180) + bs, nframes, _ = features.shape # (32, 196, 313) + mask = lengths_to_mask(lengths, device) # (32, 196) + + body_x = body_features + hand_x = hand_features + # Embed each human poses into latent vectors + # x = self.skel_embedding(x) + body_x = self.body_skel_embedding(body_x) + hand_x = self.hand_skel_embedding(hand_x) + + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + # x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] (196, 32, 256) + body_x = body_x.permute(1,0,2) + hand_x = hand_x.permute(1,0,2) + + # Each batch has its own set of tokens + # dist = torch.tile(self.global_motion_token[:, None, :], (1, bs, 1)) # (2, 32, 256) + body_dist = torch.tile(self.body_global_motion_token[:, None, :], (1, bs, 1)) # (2, 32, 256) + hand_dist = torch.tile(self.hand_global_motion_token[:, None, :], (1, bs, 1)) # (2, 32, 256) + + # create a bigger mask, to allow attend to emb + dist_masks = torch.ones((bs, body_dist.shape[0]), + dtype=bool, + device=body_x.device) # (32, 2) all one + + aug_mask = torch.cat((dist_masks, mask), 1) + + # adding the embedding token for all sequences + # xseq = torch.cat((dist, x), 0) + xseq_body = torch.cat((body_dist, body_x), 0) + xseq_hand = torch.cat((hand_dist, hand_x), 0) + + if self.pe_type == "actor": + xseq = self.query_pos_encoder(xseq) + dist = self.encoder(xseq, + src_key_padding_mask=~aug_mask)[:dist.shape[0]] + elif self.pe_type == "mld": + # xseq = self.query_pos_encoder(xseq) + # dist = self.encoder(xseq, + # src_key_padding_mask=~aug_mask)[:dist.shape[0]] + + xseq_body = self.body_query_pos_encoder(xseq_body) + body_dist = self.body_encoder(xseq_body, + src_key_padding_mask=~aug_mask)[:body_dist.shape[0]] + + xseq_hand = self.hand_query_pos_encoder(xseq_hand) + hand_dist = self.hand_encoder(xseq_hand, + src_key_padding_mask=~aug_mask)[:hand_dist.shape[0]] + + # content distribution + # self.latent_dim => 2*self.latent_dim + if self.mlp_dist: + tokens_dist = self.dist_layer(dist) + mu = tokens_dist[:, :, :self.latent_dim] + logvar = tokens_dist[:, :, self.latent_dim:] + else: + + body_mu = body_dist[0:self.latent_size, ...] + body_logvar = body_dist[self.latent_size:, ...] + hand_mu = hand_dist[0:self.latent_size, ...] + hand_logvar = hand_dist[self.latent_size:, ...] + + + body_std = body_logvar.exp().pow(0.5) + body_dist = torch.distributions.Normal(body_mu, body_std) + body_latent = body_dist.rsample() + + hand_std = hand_logvar.exp().pow(0.5) + hand_dist = torch.distributions.Normal(hand_mu, hand_std) + hand_latent = hand_dist.rsample() + + # return latent, dist + return body_latent, hand_latent, body_dist, hand_dist + + def decode(self, body_z: Tensor, hand_z: Tensor, lengths: List[int]): + mask = lengths_to_mask(lengths, body_z.device) + bs, nframes = mask.shape + + # queries = torch.zeros(nframes, bs, self.latent_dim, device=z.device) + body_queries = torch.zeros(nframes, bs, self.latent_dim, device=body_z.device) + hand_queries = torch.zeros(nframes, bs, self.latent_dim, device=hand_z.device) + + + # Pass through the transformer decoder + # with the latent vector for memory + if self.arch == "all_encoder": + xseq = torch.cat((z, queries), axis=0) + z_mask = torch.ones((bs, self.latent_size), + dtype=bool, + device=z.device) + augmask = torch.cat((z_mask, mask), axis=1) + + if self.pe_type == "actor": + xseq = self.query_pos_decoder(xseq) + output = self.decoder( + xseq, src_key_padding_mask=~augmask)[z.shape[0]:] + elif self.pe_type == "mld": + xseq = self.query_pos_decoder(xseq) + output = self.decoder( + xseq, src_key_padding_mask=~augmask)[z.shape[0]:] + + + elif self.arch == "encoder_decoder": + if self.pe_type == "actor": + queries = self.query_pos_decoder(queries) + output = self.decoder(tgt=queries, + memory=z, + tgt_key_padding_mask=~mask).squeeze(0) + elif self.pe_type == "mld": + # queries = self.query_pos_decoder(queries) + body_queries = self.body_query_pos_decoder(body_queries) + hand_queries = self.hand_query_pos_decoder(hand_queries) + + + body_output = self.body_decoder( + tgt=body_queries, + memory=body_z, + tgt_key_padding_mask=~mask, + + ).squeeze(0) + + hand_output = self.hand_decoder( + tgt=hand_queries, + memory=hand_z, + tgt_key_padding_mask=~mask, + + ).squeeze(0) + + + body_output = self.body_final_layer(body_output) + hand_output = self.hand_final_layer(hand_output) + # zero for padded area + # output[~mask.T] = 0 + body_output[~mask.T] = 0 + hand_output[~mask.T] = 0 + # Pytorch Transformer: [Sequence, Batch size, ...] + feats = torch.cat((body_output.permute(1, 0, 2), hand_output.permute(1, 0, 2)), dim=-1) + return feats diff --git a/Evaluator_272/mld/models/architectures/mld_vae.py b/Evaluator_272/mld/models/architectures/mld_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..91e0496b557e50832a665c1bb2bdb22008221a21 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/mld_vae.py @@ -0,0 +1,226 @@ +from functools import reduce +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor, nn +from torch.distributions.distribution import Distribution + +from mld.models.architectures.tools.embeddings import TimestepEmbedding, Timesteps +from mld.models.operator import PositionalEncoding +from mld.models.operator.cross_attention import ( + SkipTransformerEncoder, + SkipTransformerDecoder, + TransformerDecoder, + TransformerDecoderLayer, + TransformerEncoder, + TransformerEncoderLayer, +) +from mld.models.operator.position_encoding import build_position_encoding +from mld.utils.temos_utils import lengths_to_mask +""" +vae + +skip connection encoder +skip connection decoder + +mem for each decoder layer +""" + + +class MldVae(nn.Module): + + def __init__(self, + ablation, + nfeats: int, + latent_dim: list = [1, 256], + ff_size: int = 1024, + num_layers: int = 9, + num_heads: int = 4, + dropout: float = 0.1, + arch: str = "all_encoder", + normalize_before: bool = False, + activation: str = "gelu", + position_embedding: str = "learned", + **kwargs) -> None: + + super().__init__() + + self.latent_size = latent_dim[0] + self.latent_dim = latent_dim[-1] + input_feats = nfeats + output_feats = nfeats + self.arch = arch + self.mlp_dist = ablation.MLP_DIST + self.pe_type = ablation.PE_TYPE + + if self.pe_type == "actor": + self.query_pos_encoder = PositionalEncoding( + self.latent_dim, dropout) + self.query_pos_decoder = PositionalEncoding( + self.latent_dim, dropout) + elif self.pe_type == "mld": + self.query_pos_encoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + self.query_pos_decoder = build_position_encoding( + self.latent_dim, position_embedding=position_embedding) + else: + raise ValueError("Not Support PE type") + + encoder_layer = TransformerEncoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + encoder_norm = nn.LayerNorm(self.latent_dim) + self.encoder = SkipTransformerEncoder(encoder_layer, num_layers, + encoder_norm) + + if self.arch == "all_encoder": + decoder_norm = nn.LayerNorm(self.latent_dim) + self.decoder = SkipTransformerEncoder(encoder_layer, num_layers, + decoder_norm) + elif self.arch == "encoder_decoder": + decoder_layer = TransformerDecoderLayer( + self.latent_dim, + num_heads, + ff_size, + dropout, + activation, + normalize_before, + ) + decoder_norm = nn.LayerNorm(self.latent_dim) + self.decoder = SkipTransformerDecoder(decoder_layer, num_layers, + decoder_norm) + else: + raise ValueError("Not support architecture!") + + if self.mlp_dist: + self.global_motion_token = nn.Parameter( + torch.randn(self.latent_size, self.latent_dim)) + self.dist_layer = nn.Linear(self.latent_dim, 2 * self.latent_dim) + else: + self.global_motion_token = nn.Parameter( + torch.randn(self.latent_size * 2, self.latent_dim)) + + self.skel_embedding = nn.Linear(input_feats, self.latent_dim) + self.final_layer = nn.Linear(self.latent_dim, output_feats) + + def forward(self, features: Tensor, lengths: Optional[List[int]] = None): + + print("Should Not enter here") + + z, dist = self.encode(features, lengths) + feats_rst = self.decode(z, lengths) + return feats_rst, z, dist + + def encode( + self, + features: Tensor, + lengths: Optional[List[int]] = None + ) -> Union[Tensor, Distribution]: + if lengths is None: + lengths = [len(feature) for feature in features] + + device = features.device + + bs, nframes, nfeats = features.shape + mask = lengths_to_mask(lengths, device) + + x = features + # Embed each human poses into latent vectors + x = self.skel_embedding(x) + + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] + + # Each batch has its own set of tokens + dist = torch.tile(self.global_motion_token[:, None, :], (1, bs, 1)) + + # create a bigger mask, to allow attend to emb + dist_masks = torch.ones((bs, dist.shape[0]), + dtype=bool, + device=x.device) + aug_mask = torch.cat((dist_masks, mask), 1) + + # adding the embedding token for all sequences + xseq = torch.cat((dist, x), 0) + if self.pe_type == "actor": + xseq = self.query_pos_encoder(xseq) + dist = self.encoder(xseq, + src_key_padding_mask=~aug_mask)[:dist.shape[0]] + elif self.pe_type == "mld": + xseq = self.query_pos_encoder(xseq) + dist = self.encoder(xseq, + src_key_padding_mask=~aug_mask)[:dist.shape[0]] + + + # content distribution + # self.latent_dim => 2*self.latent_dim + if self.mlp_dist: + tokens_dist = self.dist_layer(dist) + mu = tokens_dist[:, :, :self.latent_dim] + logvar = tokens_dist[:, :, self.latent_dim:] + else: + mu = dist[0:self.latent_size, ...] + logvar = dist[self.latent_size:, ...] + + # resampling + std = logvar.exp().pow(0.5) + dist = torch.distributions.Normal(mu, std) + latent = dist.rsample() + return latent, dist + + def decode(self, z: Tensor, lengths: List[int]): + mask = lengths_to_mask(lengths, z.device) + bs, nframes = mask.shape + + queries = torch.zeros(nframes, bs, self.latent_dim, device=z.device) + + + if self.arch == "all_encoder": + xseq = torch.cat((z, queries), axis=0) + z_mask = torch.ones((bs, self.latent_size), + dtype=bool, + device=z.device) + augmask = torch.cat((z_mask, mask), axis=1) + + if self.pe_type == "actor": + xseq = self.query_pos_decoder(xseq) + output = self.decoder( + xseq, src_key_padding_mask=~augmask)[z.shape[0]:] + elif self.pe_type == "mld": + xseq = self.query_pos_decoder(xseq) + output = self.decoder( + xseq, src_key_padding_mask=~augmask)[z.shape[0]:] + + elif self.arch == "encoder_decoder": + if self.pe_type == "actor": + queries = self.query_pos_decoder(queries) + output = self.decoder(tgt=queries, + memory=z, + tgt_key_padding_mask=~mask).squeeze(0) + elif self.pe_type == "mld": + queries = self.query_pos_decoder(queries) + # mem_pos = self.mem_pos_decoder(z) + output = self.decoder( + tgt=queries, + memory=z, + tgt_key_padding_mask=~mask, + # query_pos=query_pos, + # pos=mem_pos, + ).squeeze(0) + + + output = self.final_layer(output) + # zero for padded area + output[~mask.T] = 0 + # Pytorch Transformer: [Sequence, Batch size, ...] + feats = output.permute(1, 0, 2) + return feats diff --git a/Evaluator_272/mld/models/architectures/t2m_motionenc.py b/Evaluator_272/mld/models/architectures/t2m_motionenc.py new file mode 100644 index 0000000000000000000000000000000000000000..cb3c3a304e0f0c457b25bba752de0eccd8798e2c --- /dev/null +++ b/Evaluator_272/mld/models/architectures/t2m_motionenc.py @@ -0,0 +1,58 @@ +import torch +import torch.nn as nn +from torch.nn.utils.rnn import pack_padded_sequence + + +class MovementConvEncoder(nn.Module): + def __init__(self, input_size, hidden_size, output_size): + super(MovementConvEncoder, self).__init__() + self.main = nn.Sequential( + nn.Conv1d(input_size, hidden_size, 4, 2, 1), + nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + nn.Conv1d(hidden_size, output_size, 4, 2, 1), + nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + ) + self.out_net = nn.Linear(output_size, output_size) + + def forward(self, inputs): + inputs = inputs.permute(0, 2, 1) + outputs = self.main(inputs).permute(0, 2, 1) + return self.out_net(outputs) + + +class MotionEncoderBiGRUCo(nn.Module): + def __init__(self, input_size, hidden_size, output_size): + super(MotionEncoderBiGRUCo, self).__init__() + + self.input_emb = nn.Linear(input_size, hidden_size) + self.gru = nn.GRU( + hidden_size, hidden_size, batch_first=True, bidirectional=True + ) + self.output_net = nn.Sequential( + nn.Linear(hidden_size * 2, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size), + ) + + self.hidden_size = hidden_size + self.hidden = nn.Parameter( + torch.randn((2, 1, self.hidden_size), requires_grad=True) + ) + + def forward(self, inputs, m_lens): + num_samples = inputs.shape[0] + + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = m_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output_net(gru_last) diff --git a/Evaluator_272/mld/models/architectures/t2m_textenc.py b/Evaluator_272/mld/models/architectures/t2m_textenc.py new file mode 100644 index 0000000000000000000000000000000000000000..afcc54c898b24fd1fe641ac47ace3197fe4b0167 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/t2m_textenc.py @@ -0,0 +1,78 @@ +import torch +import torch.nn as nn +from torch.nn.utils.rnn import pack_padded_sequence + + +class TextEncoderBiGRUCo(nn.Module): + def __init__(self, word_size, pos_size, hidden_size, output_size): + super(TextEncoderBiGRUCo, self).__init__() + + self.pos_emb = nn.Linear(pos_size, word_size) + self.input_emb = nn.Linear(word_size, hidden_size) + self.gru = nn.GRU( + hidden_size, hidden_size, batch_first=True, bidirectional=True + ) + self.output_net = nn.Sequential( + nn.Linear(hidden_size * 2, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size), + ) + + self.hidden_size = hidden_size + self.hidden = nn.Parameter( + torch.randn((2, 1, self.hidden_size), requires_grad=True) + ) + + def forward(self, word_embs, pos_onehot, cap_lens): + num_samples = word_embs.shape[0] + + pos_embs = self.pos_emb(pos_onehot) + inputs = word_embs + pos_embs + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = cap_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output_net(gru_last) + + + +class TextEncoderBiGRUCoV2(nn.Module): + def __init__(self, word_size, hidden_size, output_size): + super(TextEncoderBiGRUCoV2, self).__init__() + + self.input_emb = nn.Linear(word_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + self.output_net = nn.Sequential( + nn.Linear(hidden_size * 2, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size) + ) + + + self.hidden_size = hidden_size + self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) + + + def forward(self, word_embs, cap_lens): + num_samples = word_embs.shape[0] + + inputs = word_embs + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = cap_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output_net(gru_last) diff --git a/Evaluator_272/mld/models/architectures/temos/__init__.py b/Evaluator_272/mld/models/architectures/temos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/temos/motiondecoder/__init__.py b/Evaluator_272/mld/models/architectures/temos/motiondecoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/temos/motiondecoder/actor.py b/Evaluator_272/mld/models/architectures/temos/motiondecoder/actor.py new file mode 100644 index 0000000000000000000000000000000000000000..66e98e542660488bbb96d19d84b9920c356a8433 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/temos/motiondecoder/actor.py @@ -0,0 +1,60 @@ +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl + +from typing import List, Optional +from torch import nn, Tensor + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + + +class ActorAgnosticDecoder(pl.LightningModule): + def __init__(self, nfeats: int, + latent_dim: int = 256, ff_size: int = 1024, + num_layers: int = 4, num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", **kwargs) -> None: + + super().__init__() + self.save_hyperparameters(logger=False) + + output_feats = nfeats + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + + seq_trans_decoder_layer = nn.TransformerDecoderLayer(d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + + self.seqTransDecoder = nn.TransformerDecoder(seq_trans_decoder_layer, + num_layers=num_layers) + + self.final_layer = nn.Linear(latent_dim, output_feats) + + def forward(self, z: Tensor, lengths: List[int]): + mask = lengths_to_mask(lengths, z.device) + latent_dim = z.shape[1] + bs, nframes = mask.shape + nfeats = self.hparams.nfeats + + z = z[None] # sequence of 1 element for the memory + + # Construct time queries + time_queries = torch.zeros(nframes, bs, latent_dim, device=z.device) + time_queries = self.sequence_pos_encoding(time_queries) + + # Pass through the transformer decoder + # with the latent vector for memory + output = self.seqTransDecoder(tgt=time_queries, memory=z, + tgt_key_padding_mask=~mask) + + output = self.final_layer(output) + # zero for padded area + output[~mask.T] = 0 + # Pytorch Transformer: [Sequence, Batch size, ...] + feats = output.permute(1, 0, 2) + return feats diff --git a/Evaluator_272/mld/models/architectures/temos/motionencoder/__init__.py b/Evaluator_272/mld/models/architectures/temos/motionencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/temos/motionencoder/actor.py b/Evaluator_272/mld/models/architectures/temos/motionencoder/actor.py new file mode 100644 index 0000000000000000000000000000000000000000..f5c516460571cf569419cb76bcf24de800329667 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/temos/motionencoder/actor.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl + +from typing import List, Optional, Union +from torch import nn, Tensor +from torch.distributions.distribution import Distribution + +from mld.models.operator import PositionalEncoding + +class ActorAgnosticEncoder(pl.LightningModule): + def __init__(self, nfeats: int, vae: bool, + latent_dim: int = 256, ff_size: int = 1024, + num_layers: int = 4, num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", max_len: int = -1, **kwargs) -> None: + super().__init__() + self.save_hyperparameters(logger=False) + input_feats = nfeats + self.skel_embedding = nn.Linear(input_feats, latent_dim) + self.max_len = max_len + + # Action agnostic: only one set of params + if vae: + self.mu_token = nn.Parameter(torch.randn(latent_dim)) + self.logvar_token = nn.Parameter(torch.randn(latent_dim)) + else: + self.emb_token = nn.Parameter(torch.randn(latent_dim)) + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + + seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + + self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, + num_layers=num_layers) + + def lengths_to_mask(self, lengths, device): + if self.max_len == -1: + max_len = max(lengths) + mask = torch.arange(max_len, device=device).expand(len(lengths), max_len) < lengths.unsqueeze(1) + else: + mask = torch.arange(self.max_len, device=lengths.device).expand(len(lengths), self.max_len) < lengths.unsqueeze(1) + return mask + + def forward(self, features: Tensor, lengths: Optional[List[int]] = None) -> Union[Tensor, Distribution]: + if lengths is None: + lengths = [len(feature) for feature in features] + + device = features.device + + bs, nframes, nfeats = features.shape + + if not isinstance(lengths, torch.Tensor): + lengths = torch.tensor(lengths, device=device) + mask = self.lengths_to_mask(lengths, device).to(device) + + x = features + # Embed each human poses into latent vectors + x = self.skel_embedding(x) + + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] + + # Each batch has its own set of tokens + if self.hparams.vae: + mu_token = torch.tile(self.mu_token, (bs,)).reshape(bs, -1) + logvar_token = torch.tile(self.logvar_token, (bs,)).reshape(bs, -1) + + # adding the distribution tokens for all sequences + xseq = torch.cat((mu_token[None], logvar_token[None], x), 0) + + # create a bigger mask, to allow attend to mu and logvar + token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + else: + emb_token = torch.tile(self.emb_token, (bs,)).reshape(bs, -1) + + # adding the embedding token for all sequences + xseq = torch.cat((emb_token[None], x), 0) + + # create a bigger mask, to allow attend to emb + token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + + # add positional encoding + xseq = self.sequence_pos_encoding(xseq) + final = self.seqTransEncoder(xseq, src_key_padding_mask=~aug_mask) + + if self.hparams.vae: + mu, logvar = final[0], final[1] + std = logvar.exp().pow(0.5) + dist = torch.distributions.Normal(mu, std) + return dist + else: + return final[0] diff --git a/Evaluator_272/mld/models/architectures/temos/textencoder/__init__.py b/Evaluator_272/mld/models/architectures/temos/textencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert.py b/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert.py new file mode 100644 index 0000000000000000000000000000000000000000..3c9a2053954fc838013e56211bdeb44a851f063e --- /dev/null +++ b/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert.py @@ -0,0 +1,51 @@ +from typing import List, Union, Tuple +import pytorch_lightning as pl + +import torch.nn as nn +import os + +import torch +from torch import Tensor +from torch.distributions.distribution import Distribution + + +class DistilbertEncoderBase(pl.LightningModule): + def __init__(self, modelpath: str, + finetune: bool = False) -> None: + super().__init__() + + from transformers import AutoTokenizer, AutoModel + from transformers import logging + logging.set_verbosity_error() + # Tokenizer + os.environ["TOKENIZERS_PARALLELISM"] = "false" + self.tokenizer = AutoTokenizer.from_pretrained(modelpath) + + # Text model + self.text_model = AutoModel.from_pretrained(modelpath) + # Don't train the model + if not finetune: + self.text_model.training = False + for p in self.text_model.parameters(): + p.requires_grad = False + + # Then configure the model + self.text_encoded_dim = self.text_model.config.hidden_size + + def train(self, mode: bool = True): + self.training = mode + for module in self.children(): + # Don't put the model in + if module == self.text_model and not self.hparams.finetune: + continue + module.train(mode) + return self + + def get_last_hidden_state(self, texts: List[str], + return_mask: bool = False + ) -> Union[Tensor, Tuple[Tensor, Tensor]]: + encoded_inputs = self.tokenizer(texts, return_tensors="pt", padding=True) + output = self.text_model(**encoded_inputs.to(self.text_model.device)) + if not return_mask: + return output.last_hidden_state + return output.last_hidden_state, encoded_inputs.attention_mask.to(dtype=bool) diff --git a/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert_actor.py b/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert_actor.py new file mode 100644 index 0000000000000000000000000000000000000000..8c6d4c3ba4d60c08b06b889ff965575520441ef5 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/temos/textencoder/distillbert_actor.py @@ -0,0 +1,91 @@ +from .distillbert import DistilbertEncoderBase +import torch + +from typing import List, Union +from torch import nn, Tensor +from torch.distributions.distribution import Distribution + +from mld.models.operator import PositionalEncoding +from mld.utils.temos_utils import lengths_to_mask + + +class DistilbertActorAgnosticEncoder(DistilbertEncoderBase): + def __init__(self, modelpath: str, + finetune: bool = False, + vae: bool = True, + latent_dim: int = 256, + ff_size: int = 1024, + num_layers: int = 4, num_heads: int = 4, + dropout: float = 0.1, + activation: str = "gelu", **kwargs) -> None: + super().__init__(modelpath=modelpath, finetune=finetune) + self.save_hyperparameters(logger=False) + + encoded_dim = self.text_encoded_dim + # Projection of the text-outputs into the latent space + self.projection = nn.Sequential(nn.ReLU(), + nn.Linear(encoded_dim, latent_dim)) + + # TransformerVAE adapted from ACTOR + # Action agnostic: only one set of params + if vae: + self.mu_token = nn.Parameter(torch.randn(latent_dim)) + self.logvar_token = nn.Parameter(torch.randn(latent_dim)) + else: + self.emb_token = nn.Parameter(torch.randn(latent_dim)) + + self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) + + seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, + nhead=num_heads, + dim_feedforward=ff_size, + dropout=dropout, + activation=activation) + + self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, + num_layers=num_layers) + + def forward(self, texts: List[str]) -> Union[Tensor, Distribution]: + text_encoded, mask = self.get_last_hidden_state(texts, return_mask=True) + + x = self.projection(text_encoded) + bs, nframes, _ = x.shape + # bs, nframes, totjoints, nfeats = x.shape + # Switch sequence and batch_size because the input of + # Pytorch Transformer is [Sequence, Batch size, ...] + x = x.permute(1, 0, 2) # now it is [nframes, bs, latent_dim] + + if self.hparams.vae: + mu_token = torch.tile(self.mu_token, (bs,)).reshape(bs, -1) + logvar_token = torch.tile(self.logvar_token, (bs,)).reshape(bs, -1) + + # adding the distribution tokens for all sequences + xseq = torch.cat((mu_token[None], logvar_token[None], x), 0) + + # create a bigger mask, to allow attend to mu and logvar + token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + else: + emb_token = torch.tile(self.emb_token, (bs,)).reshape(bs, -1) + + # adding the embedding token for all sequences + xseq = torch.cat((emb_token[None], x), 0) + + # create a bigger mask, to allow attend to emb + token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) + aug_mask = torch.cat((token_mask, mask), 1) + + # add positional encoding + xseq = self.sequence_pos_encoding(xseq) + final = self.seqTransEncoder(xseq, src_key_padding_mask=~aug_mask) + + if self.hparams.vae: + mu, logvar = final[0], final[1] + std = logvar.exp().pow(0.5) + try: + dist = torch.distributions.Normal(mu, std) + except ValueError: + pass + return dist + else: + return final[0] diff --git a/Evaluator_272/mld/models/architectures/tools/embeddings.py b/Evaluator_272/mld/models/architectures/tools/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..632f8c7996314f2a558cbb8e60f687b6f0771fd0 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/tools/embeddings.py @@ -0,0 +1,320 @@ +# This file is taken from signjoey repository +import math + +import torch +from torch import Tensor, nn + + +def get_activation(activation_type): + if activation_type == "relu": + return nn.ReLU() + elif activation_type == "relu6": + return nn.ReLU6() + elif activation_type == "prelu": + return nn.PReLU() + elif activation_type == "selu": + return nn.SELU() + elif activation_type == "celu": + return nn.CELU() + elif activation_type == "gelu": + return nn.GELU() + elif activation_type == "sigmoid": + return nn.Sigmoid() + elif activation_type == "softplus": + return nn.Softplus() + elif activation_type == "softshrink": + return nn.Softshrink() + elif activation_type == "softsign": + return nn.Softsign() + elif activation_type == "tanh": + return nn.Tanh() + elif activation_type == "tanhshrink": + return nn.Tanhshrink() + else: + raise ValueError("Unknown activation type {}".format(activation_type)) + + +class MaskedNorm(nn.Module): + """ + Original Code from: + https://discuss.pytorch.org/t/batchnorm-for-different-sized-samples-in-batch/44251/8 + """ + + def __init__(self, norm_type, num_groups, num_features): + super().__init__() + self.norm_type = norm_type + if self.norm_type == "batch": + self.norm = nn.BatchNorm1d(num_features=num_features) + elif self.norm_type == "group": + self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=num_features) + elif self.norm_type == "layer": + self.norm = nn.LayerNorm(normalized_shape=num_features) + else: + raise ValueError("Unsupported Normalization Layer") + + self.num_features = num_features + + def forward(self, x: Tensor, mask: Tensor): + if self.training: + reshaped = x.reshape([-1, self.num_features]) + reshaped_mask = mask.reshape([-1, 1]) > 0 + selected = torch.masked_select(reshaped, reshaped_mask).reshape( + [-1, self.num_features] + ) + batch_normed = self.norm(selected) + scattered = reshaped.masked_scatter(reshaped_mask, batch_normed) + return scattered.reshape([x.shape[0], -1, self.num_features]) + else: + reshaped = x.reshape([-1, self.num_features]) + batched_normed = self.norm(reshaped) + return batched_normed.reshape([x.shape[0], -1, self.num_features]) + + + +class Embeddings(nn.Module): + + """ + Simple embeddings class + """ + + # pylint: disable=unused-argument + def __init__( + self, + embedding_dim: int = 64, + num_heads: int = 8, + scale: bool = False, + scale_factor: float = None, + norm_type: str = None, + activation_type: str = None, + vocab_size: int = 0, + padding_idx: int = 1, + freeze: bool = False, + **kwargs + ): + """ + Create new embeddings for the vocabulary. + Use scaling for the Transformer. + + :param embedding_dim: + :param scale: + :param vocab_size: + :param padding_idx: + :param freeze: freeze the embeddings during training + """ + super().__init__() + + self.embedding_dim = embedding_dim + self.vocab_size = vocab_size + self.lut = nn.Embedding(vocab_size, self.embedding_dim, padding_idx=padding_idx) + + self.norm_type = norm_type + if self.norm_type: + self.norm = MaskedNorm( + norm_type=norm_type, num_groups=num_heads, num_features=embedding_dim + ) + + self.activation_type = activation_type + if self.activation_type: + self.activation = get_activation(activation_type) + + self.scale = scale + if self.scale: + if scale_factor: + self.scale_factor = scale_factor + else: + self.scale_factor = math.sqrt(self.embedding_dim) + + if freeze: + freeze_params(self) + + # pylint: disable=arguments-differ + def forward(self, x: Tensor, mask: Tensor = None) -> Tensor: + """ + Perform lookup for input `x` in the embedding table. + + :param mask: token masks + :param x: index in the vocabulary + :return: embedded representation for `x` + """ + + x = self.lut(x) + + if self.norm_type: + x = self.norm(x, mask) + + if self.activation_type: + x = self.activation(x) + + if self.scale: + return x * self.scale_factor + else: + return x + + def __repr__(self): + return "%s(embedding_dim=%d, vocab_size=%d)" % ( + self.__class__.__name__, + self.embedding_dim, + self.vocab_size, + ) + + +class SpatialEmbeddings(nn.Module): + + """ + Simple Linear Projection Layer + (For encoder outputs to predict glosses) + """ + + # pylint: disable=unused-argument + def __init__( + self, + embedding_dim: int, + input_size: int, + num_heads: int, + freeze: bool = False, + norm_type: str = "batch", + activation_type: str = "softsign", + scale: bool = False, + scale_factor: float = None, + **kwargs + ): + """ + Create new embeddings for the vocabulary. + Use scaling for the Transformer. + + :param embedding_dim: + :param input_size: + :param freeze: freeze the embeddings during training + """ + super().__init__() + + self.embedding_dim = embedding_dim + self.input_size = input_size + self.ln = nn.Linear(self.input_size, self.embedding_dim) + + self.norm_type = norm_type + if self.norm_type: + self.norm = MaskedNorm( + norm_type=norm_type, num_groups=num_heads, num_features=embedding_dim + ) + + self.activation_type = activation_type + if self.activation_type: + self.activation = get_activation(activation_type) + + self.scale = scale + if self.scale: + if scale_factor: + self.scale_factor = scale_factor + else: + self.scale_factor = math.sqrt(self.embedding_dim) + + if freeze: + freeze_params(self) + + # pylint: disable=arguments-differ + def forward(self, x: Tensor, mask: Tensor) -> Tensor: + """ + :param mask: frame masks + :param x: input frame features + :return: embedded representation for `x` + """ + + x = self.ln(x) + + if self.norm_type: + x = self.norm(x, mask) + + if self.activation_type: + x = self.activation(x) + + if self.scale: + return x * self.scale_factor + else: + return x + + def __repr__(self): + return "%s(embedding_dim=%d, input_size=%d)" % ( + self.__class__.__name__, + self.embedding_dim, + self.input_size, + ) + +def get_timestep_embedding( + timesteps: torch.Tensor, + embedding_dim: int, + flip_sin_to_cos: bool = False, + downscale_freq_shift: float = 1, + scale: float = 1, + max_period: int = 10000, +): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the + embeddings. :return: an [N x dim] Tensor of positional embeddings. + """ + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange( + start=0, end=half_dim, dtype=torch.float32, device=timesteps.device + ) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent) + emb = timesteps[:, None].float() * emb[None, :] + + # scale embeddings + emb = scale * emb + + # concat sine and cosine embeddings + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + # flip sine and cosine embeddings + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + # zero pad + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +class TimestepEmbedding(nn.Module): + def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"): + super().__init__() + + self.linear_1 = nn.Linear(channel, time_embed_dim) + self.act = None + if act_fn == "silu": + self.act = nn.SiLU() + self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) + + def forward(self, sample): + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + return sample + + +class Timesteps(nn.Module): + def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + ) + return t_emb diff --git a/Evaluator_272/mld/models/architectures/tools/transformer_layers.py b/Evaluator_272/mld/models/architectures/tools/transformer_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..741e9f28ee69037fe4d210789ed100c1803e4107 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/tools/transformer_layers.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- +import math +import torch +import torch.nn as nn +from torch import Tensor + +# Took from https://github.com/joeynmt/joeynmt/blob/fb66afcbe1beef9acd59283bcc084c4d4c1e6343/joeynmt/transformer_layers.py + + +# pylint: disable=arguments-differ +class MultiHeadedAttention(nn.Module): + """ + Multi-Head Attention module from "Attention is All You Need" + + Implementation modified from OpenNMT-py. + https://github.com/OpenNMT/OpenNMT-py + """ + + def __init__(self, num_heads: int, size: int, dropout: float = 0.1): + """ + Create a multi-headed attention layer. + :param num_heads: the number of heads + :param size: model size (must be divisible by num_heads) + :param dropout: probability of dropping a unit + """ + super().__init__() + + assert size % num_heads == 0 + + self.head_size = head_size = size // num_heads + self.model_size = size + self.num_heads = num_heads + + self.k_layer = nn.Linear(size, num_heads * head_size) + self.v_layer = nn.Linear(size, num_heads * head_size) + self.q_layer = nn.Linear(size, num_heads * head_size) + + self.output_layer = nn.Linear(size, size) + self.softmax = nn.Softmax(dim=-1) + self.dropout = nn.Dropout(dropout) + + def forward(self, k: Tensor, v: Tensor, q: Tensor, mask: Tensor = None): + """ + Computes multi-headed attention. + + :param k: keys [B, M, D] with M being the sentence length. + :param v: values [B, M, D] + :param q: query [B, M, D] + :param mask: optional mask [B, 1, M] or [B, M, M] + :return: + """ + batch_size = k.size(0) + num_heads = self.num_heads + + # project the queries (q), keys (k), and values (v) + k = self.k_layer(k) + v = self.v_layer(v) + q = self.q_layer(q) + + # reshape q, k, v for our computation to [batch_size, num_heads, ..] + k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) + v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) + q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) + + # compute scores + q = q / math.sqrt(self.head_size) + + # batch x num_heads x query_len x key_len + scores = torch.matmul(q, k.transpose(2, 3)) + # torch.Size([48, 8, 183, 183]) + + # apply the mask (if we have one) + # we add a dimension for the heads to it below: [B, 1, 1, M] + if mask is not None: + scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf')) + + # apply attention dropout and compute context vectors. + attention = self.softmax(scores) + attention = self.dropout(attention) + # torch.Size([48, 8, 183, 183]) [bs, nheads, time, time] (for decoding) + + # v: torch.Size([48, 8, 183, 32]) (32 is 256/8) + # get context vector (select values with attention) and reshape + # back to [B, M, D] + context = torch.matmul(attention, v) # torch.Size([48, 8, 183, 32]) + context = context.transpose(1, 2).contiguous().view( + batch_size, -1, num_heads * self.head_size) + # torch.Size([48, 183, 256]) put back to 256 (combine the heads) + + output = self.output_layer(context) + # torch.Size([48, 183, 256]): 1 output per time step + + return output + + +# pylint: disable=arguments-differ +class PositionwiseFeedForward(nn.Module): + """ + Position-wise Feed-forward layer + Projects to ff_size and then back down to input_size. + """ + + def __init__(self, input_size, ff_size, dropout=0.1): + """ + Initializes position-wise feed-forward layer. + :param input_size: dimensionality of the input. + :param ff_size: dimensionality of intermediate representation + :param dropout: + """ + super().__init__() + self.layer_norm = nn.LayerNorm(input_size, eps=1e-6) + self.pwff_layer = nn.Sequential( + nn.Linear(input_size, ff_size), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(ff_size, input_size), + nn.Dropout(dropout), + ) + + def forward(self, x): + x_norm = self.layer_norm(x) + return self.pwff_layer(x_norm) + x + + +# pylint: disable=arguments-differ +class PositionalEncoding(nn.Module): + """ + Pre-compute position encodings (PE). + In forward pass, this adds the position-encodings to the + input for as many time steps as necessary. + + Implementation based on OpenNMT-py. + https://github.com/OpenNMT/OpenNMT-py + """ + + def __init__(self, + size: int = 0, + max_len: int = 5000): + """ + Positional Encoding with maximum length max_len + :param size: + :param max_len: + :param dropout: + """ + if size % 2 != 0: + raise ValueError("Cannot use sin/cos positional encoding with " + "odd dim (got dim={:d})".format(size)) + pe = torch.zeros(max_len, size) + position = torch.arange(0, max_len).unsqueeze(1) + div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) * + -(math.log(10000.0) / size))) + pe[:, 0::2] = torch.sin(position.float() * div_term) + pe[:, 1::2] = torch.cos(position.float() * div_term) + pe = pe.unsqueeze(0) # shape: [1, size, max_len] + super().__init__() + self.register_buffer('pe', pe) + self.dim = size + + def forward(self, emb): + """Embed inputs. + Args: + emb (FloatTensor): Sequence of word vectors + ``(seq_len, batch_size, self.dim)`` + """ + # Add position encodings + return emb + self.pe[:, :emb.size(1)] + + +class TransformerEncoderLayer(nn.Module): + """ + One Transformer encoder layer has a Multi-head attention layer plus + a position-wise feed-forward layer. + """ + + def __init__(self, + size: int = 0, + ff_size: int = 0, + num_heads: int = 0, + dropout: float = 0.1): + """ + A single Transformer layer. + :param size: + :param ff_size: + :param num_heads: + :param dropout: + """ + super().__init__() + + self.layer_norm = nn.LayerNorm(size, eps=1e-6) + self.src_src_att = MultiHeadedAttention(num_heads, size, + dropout=dropout) + self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, + dropout=dropout) + self.dropout = nn.Dropout(dropout) + self.size = size + + # pylint: disable=arguments-differ + def forward(self, x: Tensor, mask: Tensor) -> Tensor: + """ + Forward pass for a single transformer encoder layer. + First applies layer norm, then self attention, + then dropout with residual connection (adding the input to the result), + and then a position-wise feed-forward layer. + + :param x: layer input + :param mask: input mask + :return: output tensor + """ + x_norm = self.layer_norm(x) + h = self.src_src_att(x_norm, x_norm, x_norm, mask) + h = self.dropout(h) + x + o = self.feed_forward(h) + return o + + +class TransformerDecoderLayer(nn.Module): + """ + Transformer decoder layer. + + Consists of self-attention, source-attention, and feed-forward. + """ + + def __init__(self, + size: int = 0, + ff_size: int = 0, + num_heads: int = 0, + dropout: float = 0.1): + """ + Represents a single Transformer decoder layer. + + It attends to the source representation and the previous decoder states. + + :param size: model dimensionality + :param ff_size: size of the feed-forward intermediate layer + :param num_heads: number of heads + :param dropout: dropout to apply to input + """ + super().__init__() + self.size = size + + self.trg_trg_att = MultiHeadedAttention(num_heads, size, + dropout=dropout) + self.src_trg_att = MultiHeadedAttention(num_heads, size, + dropout=dropout) + + self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, + dropout=dropout) + + self.x_layer_norm = nn.LayerNorm(size, eps=1e-6) + self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6) + + self.dropout = nn.Dropout(dropout) + + # pylint: disable=arguments-differ + def forward(self, + x: Tensor = None, + memory: Tensor = None, + src_mask: Tensor = None, + trg_mask: Tensor = None) -> Tensor: + """ + Forward pass of a single Transformer decoder layer. + + :param x: inputs + :param memory: source representations + :param src_mask: source mask + :param trg_mask: target mask (so as to not condition on future steps) + :return: output tensor + """ + # decoder/target self-attention + x_norm = self.x_layer_norm(x) # torch.Size([48, 183, 256]) + h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask) + h1 = self.dropout(h1) + x + + # source-target attention + h1_norm = self.dec_layer_norm(h1) # torch.Size([48, 183, 256]) (same for memory) + h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask) + + # final position-wise feed-forward layer + o = self.feed_forward(self.dropout(h2) + h1) + + return o diff --git a/Evaluator_272/mld/models/architectures/vision_transformer.py b/Evaluator_272/mld/models/architectures/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..d47cf9a4bb7f0e4d6092b55a798f5baec4b7228c --- /dev/null +++ b/Evaluator_272/mld/models/architectures/vision_transformer.py @@ -0,0 +1,954 @@ +""" +This script is borrowed from https://github.com/rwightman/pytorch-image-models. +Adhere to their licence to use this script + +We hacked it a little bit to make it happy in our framework. +""" +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +import math +import warnings +import random +import numpy as np +import joblib + +from collections import OrderedDict +from functools import partial +from itertools import repeat +# from torch._six import container_abcs + +from mld.utils.maed_utils import DropPath, determine_output_feature_dim, load_state_dict +from mld.models.architectures.hrnet import get_hrnet +from mld.models.architectures.resnetv2 import ResNetV2 +from .ghost_nas_network import get_ghostnas +from .ghost_nas_network_tiny import get_ghostnas as get_ghostnas_tiny +# from torchvision.models.utils import load_state_dict_from_url + +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +import math +import warnings +import random +import numpy as np +import joblib + +from collections import OrderedDict +from functools import partial +from itertools import repeat +# from torch._six import container_abcs + +model_urls = { + 'vit_tiny_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_small_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth', + 'vit_base_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_base_patch16_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', + 'vit_base_patch32_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth', + 'vit_large_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth', + 'vit_large_patch16_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth', + 'vit_large_patch32_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + 'vit_base_resnet50_224_in21k': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', +} + +model_urls = { + 'vit_tiny_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_small_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth', + 'vit_base_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_base_patch16_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', + 'vit_base_patch32_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth', + 'vit_large_patch16_224': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth', + 'vit_large_patch16_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth', + 'vit_large_patch32_384': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + 'vit_base_resnet50_224_in21k': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', +} + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + st_mode='vanilla'): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.mode = st_mode + if self.mode == 'parallel': + self.ts_attn = nn.Linear(dim * 2, dim * 2) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + else: + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.attn_count_s = None + self.attn_count_t = None + + def forward(self, x, seqlen=1): + B, N, C = x.shape + + if self.mode == 'series': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_spatial(q, k, v) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_temporal(q, k, v, seqlen=seqlen) + elif self.mode == 'parallel': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x_t = self.forward_temporal(q, k, v, seqlen=seqlen) + x_s = self.forward_spatial(q, k, v) + + alpha = torch.cat([x_s, x_t], dim=-1) + alpha = alpha.mean(dim=1, keepdim=True) + alpha = self.ts_attn(alpha).reshape(B, 1, C, 2) + alpha = alpha.softmax(dim=-1) + #self.count_attn(alpha) + + x = x_t * alpha[:, :, :, 1] + x_s * alpha[:, :, :, 0] + #x = (x_t + x_s) / 2 + elif self.mode == 'coupling': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_coupling(q, k, v, seqlen=seqlen) + elif self.mode == 'vanilla': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_spatial(q, k, v) + elif self.mode == 'temporal': + x = x.mean(dim=1, keepdim=True) + N = 1 + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute( + 2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_temporal(q, k, v, seqlen=seqlen) + else: + raise NotImplementedError(self.mode) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def reshape_T(self, x, seqlen=1, inverse=False): + if not inverse: + N, C = x.shape[-2:] + x = x.reshape(-1, seqlen, self.num_heads, N, C).transpose(1, 2) + x = x.reshape(-1, self.num_heads, seqlen * N, C) #(B, H, TN, c) + else: + TN, C = x.shape[-2:] + x = x.reshape(-1, self.num_heads, seqlen, TN // seqlen, + C).transpose(1, 2) + x = x.reshape(-1, self.num_heads, TN // seqlen, C) #(BT, H, N, C) + return x + + def forward_coupling(self, q, k, v, seqlen=8): + BT, _, N, C = q.shape + q = self.reshape_T(q, seqlen) + k = self.reshape_T(k, seqlen) + v = self.reshape_T(v, seqlen) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = attn @ v + x = self.reshape_T(x, seqlen, inverse=True) + x = x.transpose(1, 2).reshape(BT, N, C * self.num_heads) + return x + + def forward_spatial(self, q, k, v): + B, _, N, C = q.shape + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = attn @ v + x = x.transpose(1, 2).reshape(B, N, C * self.num_heads) + return x + + def forward_temporal(self, q, k, v, seqlen=8): + B, _, N, C = q.shape + qt = q.reshape(-1, seqlen, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) #(B, H, N, T, C) + kt = k.reshape(-1, seqlen, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) #(B, H, N, T, C) + vt = v.reshape(-1, seqlen, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) #(B, H, N, T, C) + + attn = (qt @ kt.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = attn @ vt #(B, H, N, T, C) + x = x.permute(0, 3, 2, 1, 4).reshape(B, N, C * self.num_heads) + return x + + def count_attn(self, attn): + attn = attn.detach().cpu().numpy() + attn = attn.mean(axis=1) + attn_t = attn[:, :, 1].mean(axis=1) + attn_s = attn[:, :, 0].mean(axis=1) + if self.attn_count_s is None: + self.attn_count_s = attn_s + self.attn_count_t = attn_t + else: + self.attn_count_s = np.concatenate([self.attn_count_s, attn_s], + axis=0) + self.attn_count_t = np.concatenate([self.attn_count_t, attn_t], + axis=0) + + +class Block(nn.Module): + + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + st_mode='vanilla'): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + st_mode=st_mode) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x, seqlen=1): + x = x + self.drop_path(self.attn(self.norm1(x), seqlen)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = tuple(repeat(img_size, 2)) + patch_size = tuple(repeat(patch_size, 2)) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // + patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, + embed_dim, + kernel_size=patch_size, + stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + + def __init__(self, + backbone, + img_size=224, + feature_size=None, + in_chans=3, + embed_dim=768): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = tuple(repeat(img_size, 2)) + self.img_size = img_size + self.backbone = backbone + if feature_size is None: + feature_size, feature_dim = determine_output_feature_dim( + inp_size=(1, in_chans, img_size[0], img_size[1]), + model=self.backbone) + else: + feature_size = tuple(repeat(feature_size, n)) + feature_dim = self.backbone.feature_info.channels()[-1] + self.num_patches = feature_size[0] * feature_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, 1) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[ + -1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + representation_size=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + hybrid_backbone=None, + norm_layer=nn.LayerNorm, + st_mode='vanilla'): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed(hybrid_backbone, + img_size=img_size, + in_chans=in_chans, + embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed(img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block(dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + st_mode=st_mode) for i in range(depth) + ]) + self.norm = norm_layer(embed_dim) + self.st_mode = st_mode + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential( + OrderedDict([('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh())])) + else: + self.pre_logits = nn.Identity() + + # Classifier head + self.head = nn.Linear( + embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + + if st_mode in ['coupling', 'parallel', 'series']: + self.temp_embed = nn.Parameter(torch.zeros(1, 16, 1, embed_dim)) + trunc_normal_(self.temp_embed, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear( + self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, seqlen=1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand( + B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + + if self.st_mode in ['coupling', 'parallel', 'series']: + _, N, C = x.shape + x = x.reshape(-1, seqlen, N, C) + self.temp_embed[:, :seqlen, :, :] + x = x.reshape(B, N, C) + + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x, seqlen) + + x = self.norm(x)[:, 0] + x = self.pre_logits(x) + return x + + def forward(self, x, seqlen=1): + x = self.forward_features(x, seqlen) + x = self.head(x) + return x + + +def _conv_filter(state_dict, patch_size=16): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + v = v.reshape((v.shape[0], 3, patch_size, patch_size)) + out_dict[k] = v + return out_dict + + +def vit_small_patch16_224(pretrained=False, strict=True, **kwargs): + if pretrained: + # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model + kwargs.setdefault('qk_scale', 768**-0.5) + model = VisionTransformer(patch_size=16, + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3., + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_small_patch16_224'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_base_patch16_224(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_base_patch16_224'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_base_patch16_384(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_base_patch16_384'], + progress=False, + map_location='cpu') + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_base_patch32_384(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_base_patch32_384'], + progress=False, + map_location='cpu') + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_large_patch16_224(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_large_patch16_224'], + progress=False, + map_location='cpu') + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_large_patch16_384(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_large_patch16_384'], + progress=False, + map_location='cpu') + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_large_patch32_384(pretrained=False, strict=True, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=32, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['vit_large_patch32_384'], + progress=False, + map_location='cpu') + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_huge_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + **kwargs) + model.default_cfg = default_cfgs['vit_huge_patch16_224'] + return model + + +def vit_huge_patch32_384(pretrained=False, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=32, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + **kwargs) + model.default_cfg = default_cfgs['vit_huge_patch32_384'] + return model + + +def vit_base_resnet50_224_in21k(pretrained=False, strict=True, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = ResNetV2(layers=(3, 4, 9), + num_classes=0, + global_pool='', + in_chans=kwargs.get('in_chans', 3), + preact=False, + stem_type='same') + model = VisionTransformer(patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=768, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + if pretrained: + state_dict = model_zoo.load_url( + model_urls['vit_base_resnet50_224_in21k'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + if kwargs['num_classes'] != 1000: + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=strict) + return model + + +def vit_custom_resnet50_224_in21k(num_blocks, + num_heads, + st_mode, + pretrained=True, + **kwargs): + """ Hybrid model with a R50 and a Vit of custom layers . + """ + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = ResNetV2(layers=(3, 4, 9), + num_classes=0, + global_pool='', + in_chans=kwargs.get('in_chans', 3), + preact=False, + stem_type='same') + model = VisionTransformer(patch_size=16, + embed_dim=768, + depth=num_blocks, + num_heads=num_heads, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=768, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + st_mode=st_mode, + **kwargs) + if pretrained: + state_dict = model_zoo.load_url( + model_urls['vit_base_resnet50_224_in21k'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + del state_dict['head.weight'] + del state_dict['head.bias'] + model.load_state_dict(state_dict, strict=False) + return model + + +def vit_custom_resnet50_320_in21k(image_size, + num_blocks, + num_heads, + st_mode, + pretrained=True, + **kwargs): + """ Hybrid model with a R50 and a Vit of custom layers . + """ + + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = ResNetV2(layers=(3, 4, 9), + num_classes=0, + global_pool='', + in_chans=kwargs.get('in_chans', 3), + preact=False, + stem_type='same') + model = VisionTransformer(img_size=image_size, + patch_size=16, + embed_dim=768, + depth=num_blocks, + num_heads=num_heads, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=768, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + st_mode=st_mode, + **kwargs) + if pretrained: + state_dict = model_zoo.load_url( + model_urls['vit_base_resnet50_224_in21k'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + del state_dict['head.weight'] + del state_dict['head.bias'] + del state_dict['pos_embed'] + model.load_state_dict(state_dict, strict=False) + return model + + +def vit_custom_ghostnet_224_in21k(num_blocks, + num_heads, + st_mode, + pretrained=True, + embed_dim=768, + tiny=False, + **kwargs): + """ Hybrid model with a R50 and a Vit of custom layers . + """ + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + if tiny: + backbone = get_ghostnas_tiny(flops=170) + else: + backbone = get_ghostnas(flops=170) + model = VisionTransformer(patch_size=16, + embed_dim=embed_dim, + depth=num_blocks, + num_heads=num_heads, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=embed_dim, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + st_mode=st_mode, + **kwargs) + if pretrained: + # state_dict = model_zoo.load_url(model_urls['vit_base_resnet50_224_in21k'], progress=False, map_location='cpu') + PRETRAINED = "/apdcephfs/share_1227775/sylvainliu/data/smpldatas/ghostnas_170M_pretrain_1141226.pth" + state_dict = torch.load(PRETRAINED) + state_dict = _conv_filter(state_dict) + # del state_dict['head.weight'] + # del state_dict['head.bias'] + model.patch_embed.load_state_dict(state_dict, strict=False) + return model + + +def vit_custom_hrnet48_224_in21k(image_size, + num_blocks, + num_heads, + st_mode, + pretrained=True, + **kwargs): + """ Hybrid model with a R50 and a Vit of custom layers . + """ + + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = get_hrnet(model_type='hrnet48', input_size=224, pretrained=True) + + model = VisionTransformer(img_size=image_size, + patch_size=16, + embed_dim=768, + depth=num_blocks, + num_heads=num_heads, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=768, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + st_mode=st_mode, + **kwargs) + if pretrained: + state_dict = model_zoo.load_url( + model_urls['vit_base_resnet50_224_in21k'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + model = load_state_dict(model, state_dict) + return model + + +def vit_custom_hrnet48_320_in21k(image_size, + num_blocks, + num_heads, + st_mode, + pretrained=True, + **kwargs): + """ Hybrid model with a R50 and a Vit of custom layers . + """ + + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = get_hrnet(model_type='hrnet48', input_size=320, pretrained=True) + + model = VisionTransformer(img_size=image_size, + patch_size=16, + embed_dim=768, + depth=num_blocks, + num_heads=num_heads, + hybrid_backbone=backbone, + mlp_ratio=4, + qkv_bias=True, + representation_size=768, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + st_mode=st_mode, + **kwargs) + if pretrained: + state_dict = model_zoo.load_url( + model_urls['vit_base_resnet50_224_in21k'], + progress=False, + map_location='cpu') + state_dict = _conv_filter(state_dict) + model = load_state_dict(model, state_dict) + return model diff --git a/Evaluator_272/mld/models/architectures/vposert_vae.py b/Evaluator_272/mld/models/architectures/vposert_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..4941a6a08f3fd763b456b0e6c8b8931a28227086 --- /dev/null +++ b/Evaluator_272/mld/models/architectures/vposert_vae.py @@ -0,0 +1,113 @@ +from functools import reduce +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor, nn +from torch.distributions.distribution import Distribution + +from mld.models.architectures.tools.embeddings import (TimestepEmbedding, + Timesteps) +from mld.models.operator import PositionalEncoding +from mld.models.operator.cross_attention import ( + SkipTransformerEncoder, SkipTransformerDecoder, TransformerDecoder, + TransformerDecoderLayer, TransformerEncoder, TransformerEncoderLayer) +from mld.models.operator.position_encoding import build_position_encoding +from mld.utils.temos_utils import lengths_to_mask +''' +vae +skip connection encoder +skip connection decoder +mem for each decoder layer +''' + + +class VPosert(nn.Module): + + def __init__(self, cfg, **kwargs) -> None: + + super(VPosert, self).__init__() + + num_neurons = 512 + self.latentD = 256 + + # self.num_joints = 21 + n_features = 196 * 263 + + self.encoder_net = nn.Sequential( + BatchFlatten(), nn.BatchNorm1d(n_features), + nn.Linear(n_features, num_neurons), nn.LeakyReLU(), + nn.BatchNorm1d(num_neurons), nn.Dropout(0.1), + nn.Linear(num_neurons, num_neurons), + nn.Linear(num_neurons, num_neurons), + NormalDistDecoder(num_neurons, self.latentD)) + + self.decoder_net = nn.Sequential( + nn.Linear(self.latentD, num_neurons), + nn.LeakyReLU(), + nn.Dropout(0.1), + nn.Linear(num_neurons, num_neurons), + nn.LeakyReLU(), + nn.Linear(num_neurons, n_features), + ContinousRotReprDecoder(), + ) + + def forward(self, features: Tensor, lengths: Optional[List[int]] = None): + q_z = self.encode(features) + feats_rst = self.decode(q_z) + return feats_rst, q_z + + def encode(self, pose_body, lengths: Optional[List[int]] = None): + ''' + :param Pin: Nx(numjoints*3) + :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle + :return: + ''' + q_z = self.encoder_net(pose_body) + q_z_sample = q_z.rsample() + return q_z_sample.unsqueeze(0), q_z + + def decode(self, Zin, lengths: Optional[List[int]] = None): + bs = Zin.shape[0] + Zin = Zin[0] + + prec = self.decoder_net(Zin) + + return prec + + + +class BatchFlatten(nn.Module): + + def __init__(self): + super(BatchFlatten, self).__init__() + self._name = 'batch_flatten' + + def forward(self, x): + return x.view(x.shape[0], -1) + + +class ContinousRotReprDecoder(nn.Module): + + def __init__(self): + super(ContinousRotReprDecoder, self).__init__() + + def forward(self, module_input): + reshaped_input = module_input.view(-1, 196, 263) + + return reshaped_input + + +class NormalDistDecoder(nn.Module): + + def __init__(self, num_feat_in, latentD): + super(NormalDistDecoder, self).__init__() + + self.mu = nn.Linear(num_feat_in, latentD) + self.logvar = nn.Linear(num_feat_in, latentD) + + def forward(self, Xout): + return torch.distributions.normal.Normal(self.mu(Xout), + F.softplus(self.logvar(Xout))) diff --git a/Evaluator_272/mld/models/body_skeleton/__init__.py b/Evaluator_272/mld/models/body_skeleton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/body_skeleton/paramUtil.py b/Evaluator_272/mld/models/body_skeleton/paramUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..c8ea8218a329a828c48a24c8b298ffae84e0e2fe --- /dev/null +++ b/Evaluator_272/mld/models/body_skeleton/paramUtil.py @@ -0,0 +1,98 @@ +import numpy as np + +# Define a kinematic tree for the skeletal struture +kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] + +kit_raw_offsets = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1] + ] +) + +t2m_raw_offsets = np.array([[0,0,0], + [1,0,0], + [-1,0,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,0,1], + [0,0,1], + [0,1,0], + [1,0,0], + [-1,0,0], + [0,0,1], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0]]) + +# 30 +t2m_hand_raw_offsets = np.array([[1,0,0], # left_index1 + [1,0,0], # left_index2 + [1,0,0], # left_index3 + [1,0,0], # left_middle1 + [1,0,0], # left_middle2 + [1,0,0], # left_middle3 + [1,0,0], # left_pinky1 + [1,0,0], # left_pinky2 + [1,0,0], # left_pinky3 + [1,0,0], # left_ring1 + [1,0,0], # left_ring2 + [1,0,0], # left_ring3 + [1,0,0], # left_thumb1 + [1,0,0], # left_thumb2 + [1,0,0], # left_thumb3 + [-1,0,0], # right_index1 + [-1,0,0], # right_index2 + [-1,0,0], # right_index3 + [-1,0,0], # right_middle1 + [-1,0,0], # right_middle2 + [-1,0,0], # right_middle3 + [-1,0,0], # right_pinky1 + [-1,0,0], # right_pinky2 + [-1,0,0], # right_pinky3 + [-1,0,0], # right_ring1 + [-1,0,0], # right_ring2 + [-1,0,0], # right_ring3 + [-1,0,0], # right_thumb1 + [-1,0,0], # right_thumb2 + [-1,0,0],]) # right_thumb3 + +t2m_raw_body_hand_offsets = np.concatenate((t2m_raw_offsets, t2m_hand_raw_offsets), axis=0) + +t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] +t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] +t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] + +t2m_body_hand_kinematic_chain = t2m_kinematic_chain + t2m_left_hand_chain + t2m_right_hand_chain + +kit_tgt_skel_id = '03950' + +t2m_tgt_skel_id = '000021' + diff --git a/Evaluator_272/mld/models/body_skeleton/quaternion.py b/Evaluator_272/mld/models/body_skeleton/quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..dca3d890080a4e91e3f275f442b0aed006562881 --- /dev/null +++ b/Evaluator_272/mld/models/body_skeleton/quaternion.py @@ -0,0 +1,423 @@ +# Copyright (c) 2018-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch +import numpy as np + +_EPS4 = np.finfo(float).eps * 4.0 + +_FLOAT_EPS = np.finfo(np.float64).eps + +# PyTorch-backed implementations +def qinv(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + mask = torch.ones_like(q) + mask[..., 1:] = -mask[..., 1:] + return q * mask + + +def qinv_np(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return qinv(torch.from_numpy(q).float()).numpy() + + +def qnormalize(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return q / torch.norm(q, dim=-1, keepdim=True) + + +def qmul(q, r): + """ + Multiply quaternion(s) q with quaternion(s) r. + Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. + Returns q*r as a tensor of shape (*, 4). + """ + assert q.shape[-1] == 4 + assert r.shape[-1] == 4 + + original_shape = q.shape + + # Compute outer product + terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) + + w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] + x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] + y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] + z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] + return torch.stack((w, x, y, z), dim=1).view(original_shape) + + +def qrot(q, v): + """ + Rotate vector(s) v about the rotation described by quaternion(s) q. + Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, + where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + assert v.shape[-1] == 3 + assert q.shape[:-1] == v.shape[:-1] + + original_shape = list(v.shape) + # print(q.shape) + q = q.contiguous().view(-1, 4) + v = v.contiguous().view(-1, 3) + + qvec = q[:, 1:] + uv = torch.cross(qvec, v, dim=1) + uuv = torch.cross(qvec, uv, dim=1) + return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) + + +def qeuler(q, order, epsilon=0, deg=True): + """ + Convert quaternion(s) q to Euler angles. + Expects a tensor of shape (*, 4), where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + + original_shape = list(q.shape) + original_shape[-1] = 3 + q = q.view(-1, 4) + + q0 = q[:, 0] + q1 = q[:, 1] + q2 = q[:, 2] + q3 = q[:, 3] + + if order == 'xyz': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + elif order == 'yzx': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon)) + elif order == 'zxy': + x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'xzy': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon)) + elif order == 'yxz': + x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'zyx': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + else: + raise + + if deg: + return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi + else: + return torch.stack((x, y, z), dim=1).view(original_shape) + + +# Numpy-backed implementations + +def qmul_np(q, r): + q = torch.from_numpy(q).contiguous().float() + r = torch.from_numpy(r).contiguous().float() + return qmul(q, r).numpy() + + +def qrot_np(q, v): + q = torch.from_numpy(q).contiguous().float() + v = torch.from_numpy(v).contiguous().float() + return qrot(q, v).numpy() + + +def qeuler_np(q, order, epsilon=0, use_gpu=False): + if use_gpu: + q = torch.from_numpy(q).cuda().float() + return qeuler(q, order, epsilon).cpu().numpy() + else: + q = torch.from_numpy(q).contiguous().float() + return qeuler(q, order, epsilon).numpy() + + +def qfix(q): + """ + Enforce quaternion continuity across the time dimension by selecting + the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) + between two consecutive frames. + + Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints. + Returns a tensor of the same shape. + """ + assert len(q.shape) == 3 + assert q.shape[-1] == 4 + + result = q.copy() + dot_products = np.sum(q[1:] * q[:-1], axis=2) + mask = dot_products < 0 + mask = (np.cumsum(mask, axis=0) % 2).astype(bool) + result[1:][mask] *= -1 + return result + + +def euler2quat(e, order, deg=True): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.view(-1, 3) + + ## if euler angles in degrees + if deg: + e = e * np.pi / 180. + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1) + ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1) + rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.view(original_shape) + + +def expmap_to_quaternion(e): + """ + Convert axis-angle rotations (aka exponential maps) to quaternions. + Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". + Expects a tensor of shape (*, 3), where * denotes any number of dimensions. + Returns a tensor of shape (*, 4). + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.reshape(-1, 3) + + theta = np.linalg.norm(e, axis=1).reshape(-1, 1) + w = np.cos(0.5 * theta).reshape(-1, 1) + xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e + return np.concatenate((w, xyz), axis=1).reshape(original_shape) + + +def euler_to_quaternion(e, order): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.reshape(-1, 3) + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1) + ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1) + rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul_np(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.reshape(original_shape) + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def quaternion_to_matrix_np(quaternions): + q = torch.from_numpy(quaternions).contiguous().float() + return quaternion_to_matrix(q).numpy() + + +def quaternion_to_cont6d_np(quaternions): + rotation_mat = quaternion_to_matrix_np(quaternions) + cont_6d = np.concatenate([rotation_mat[..., 0], rotation_mat[..., 1]], axis=-1) + return cont_6d + + +def quaternion_to_cont6d(quaternions): + rotation_mat = quaternion_to_matrix(quaternions) + cont_6d = torch.cat([rotation_mat[..., 0], rotation_mat[..., 1]], dim=-1) + return cont_6d + + +def cont6d_to_matrix(cont6d): + assert cont6d.shape[-1] == 6, "The last dimension must be 6" + x_raw = cont6d[..., 0:3] + y_raw = cont6d[..., 3:6] + + x = x_raw / torch.norm(x_raw, dim=-1, keepdim=True) + z = torch.cross(x, y_raw, dim=-1) + z = z / torch.norm(z, dim=-1, keepdim=True) + + y = torch.cross(z, x, dim=-1) + + x = x[..., None] + y = y[..., None] + z = z[..., None] + + mat = torch.cat([x, y, z], dim=-1) + return mat + + +def cont6d_to_matrix_np(cont6d): + q = torch.from_numpy(cont6d).contiguous().float() + return cont6d_to_matrix(q).numpy() + + +def qpow(q0, t, dtype=torch.float): + ''' q0 : tensor of quaternions + t: tensor of powers + ''' + q0 = qnormalize(q0) + theta0 = torch.acos(q0[..., 0]) + + ## if theta0 is close to zero, add epsilon to avoid NaNs + mask = (theta0 <= 10e-10) * (theta0 >= -10e-10) + theta0 = (1 - mask) * theta0 + mask * 10e-10 + v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1) + + if isinstance(t, torch.Tensor): + q = torch.zeros(t.shape + q0.shape) + theta = t.view(-1, 1) * theta0.view(1, -1) + else: ## if t is a number + q = torch.zeros(q0.shape) + theta = t * theta0 + + q[..., 0] = torch.cos(theta) + q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1) + + return q.to(dtype) + + +def qslerp(q0, q1, t): + ''' + q0: starting quaternion + q1: ending quaternion + t: array of points along the way + + Returns: + Tensor of Slerps: t.shape + q0.shape + ''' + + q0 = qnormalize(q0) + q1 = qnormalize(q1) + q_ = qpow(qmul(q1, qinv(q0)), t) + + return qmul(q_, + q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous()) + + +def qbetween(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v = torch.cross(v0, v1) + w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1, + keepdim=True) + return qnormalize(torch.cat([w, v], dim=-1)) + + +def qbetween_np(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v0 = torch.from_numpy(v0).float() + v1 = torch.from_numpy(v1).float() + return qbetween(v0, v1).numpy() + + +def lerp(p0, p1, t): + if not isinstance(t, torch.Tensor): + t = torch.Tensor([t]) + + new_shape = t.shape + p0.shape + new_view_t = t.shape + torch.Size([1] * len(p0.shape)) + new_view_p = torch.Size([1] * len(t.shape)) + p0.shape + p0 = p0.view(new_view_p).expand(new_shape) + p1 = p1.view(new_view_p).expand(new_shape) + t = t.view(new_view_t).expand(new_shape) + + return p0 + t * (p1 - p0) diff --git a/Evaluator_272/mld/models/body_skeleton/skeleton.py b/Evaluator_272/mld/models/body_skeleton/skeleton.py new file mode 100644 index 0000000000000000000000000000000000000000..ac25a5773a04202c2059086dbf2e0d634fdf0290 --- /dev/null +++ b/Evaluator_272/mld/models/body_skeleton/skeleton.py @@ -0,0 +1,271 @@ +from .quaternion import * +import scipy.ndimage.filters as filters + +class Skeleton(object): + def __init__(self, offset, kinematic_tree): + self._raw_offset_np = offset.numpy() + self._raw_offset = offset.clone().detach().float() + self._kinematic_tree = kinematic_tree + self._offset = None + self._parents = [0] * len(self._raw_offset) + self._parents[0] = -1 + for chain in self._kinematic_tree: + for j in range(1, len(chain)): + self._parents[chain[j]] = chain[j-1] + + def njoints(self): + return len(self._raw_offset) + + def offset(self): + return self._offset + + def set_offset(self, offsets): + self._offset = offsets.clone().detach().float() + + def kinematic_tree(self): + return self._kinematic_tree + + def parents(self): + return self._parents + + # joints (batch_size, joints_num, 3) + def get_offsets_joints_batch(self, joints): + assert len(joints.shape) == 3 + _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone() + for i in range(1, self._raw_offset.shape[0]): + _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i] + + self._offset = _offsets.detach() + return _offsets + + # joints (joints_num, 3) + def get_offsets_joints(self, joints): + assert len(joints.shape) == 2 + _offsets = self._raw_offset.clone() + for i in range(1, self._raw_offset.shape[0]): + # print(joints.shape) + _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i] + + self._offset = _offsets.detach() + return _offsets + + # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder + # joints (batch_size, joints_num, 3) + def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False): + assert len(face_joint_idx) == 4 + '''Get Forward Direction''' + l_hip, r_hip, sdr_r, sdr_l = face_joint_idx + across1 = joints[:, r_hip] - joints[:, l_hip] + across2 = joints[:, sdr_r] - joints[:, sdr_l] + across = across1 + across2 + across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis] + # print(across1.shape, across2.shape) + + # forward (batch_size, 3) + forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + if smooth_forward: + forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest') + # forward (batch_size, 3) + forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis] + + '''Get Root Rotation''' + target = np.array([[0,0,1]]).repeat(len(forward), axis=0) + root_quat = qbetween_np(forward, target) + + '''Inverse Kinematics''' + # quat_params (batch_size, joints_num, 4) + # print(joints.shape[:-1]) + quat_params = np.zeros(joints.shape[:-1] + (4,)) + # print(quat_params.shape) + root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + quat_params[:, 0] = root_quat + # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + for chain in self._kinematic_tree: + R = root_quat + for j in range(len(chain) - 1): + # (batch, 3) + u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0) + # print(u.shape) + # (batch, 3) + v = joints[:, chain[j+1]] - joints[:, chain[j]] + v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis] + # print(u.shape, v.shape) + rot_u_v = qbetween_np(u, v) + + R_loc = qmul_np(qinv_np(R), rot_u_v) + + quat_params[:,chain[j + 1], :] = R_loc + R = qmul_np(R, R_loc) + + return quat_params + + # joints (batch_size, frames, joints_num, 3) + def inverse_kinematics(self, joints, face_joint_idx, smooth_forward=False): + + bs = joints.shape[0] + frame = joints.shape[1] + joint_num = joints.shape[2] + + + joints = joints.reshape(-1, joints.shape[-2], joints.shape[-1]) + assert len(face_joint_idx) == 4 + '''Get Forward Direction''' + l_hip, r_hip, sdr_r, sdr_l = face_joint_idx + across1 = joints[:,r_hip] - joints[:, l_hip] + across2 = joints[:,sdr_r] - joints[:, sdr_l] + across = across1 + across2 + + # across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis] + # across = data / np.sqrt((data**2).sum(axis=-1))[:, np.newaxis] + across = across / torch.sqrt((across**2).sum(dim=-1)).unsqueeze(1) + + + # print(across1.shape, across2.shape) + + # forward (batch_size, 3) + # forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + forward = torch.cross(torch.tensor([[0, 1, 0]], dtype=torch.float32).to(joints.device), across, dim=-1) + + if smooth_forward: + forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest') + # forward (batch_size, 3) + # forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis] + # forward = torch.norm(forward, p=2, dim=-1).unsqueeze(-1) + forward = forward / torch.sqrt((forward**2).sum(dim=-1)).unsqueeze(-1) + + '''Get Root Rotation''' + # target = np.array([[0,0,1]]).repeat(len(forward), axis=0) + target = torch.tensor([[0, 0, 1]], dtype=torch.float32).expand(len(forward), -1).to(joints.device) + root_quat = qbetween(forward, target) + + '''Inverse Kinematics''' + # quat_params (batch_size, joints_num, 4) + # print(joints.shape[:-1]) + # quat_params = np.zeros(joints.shape[:-1] + (4,)) + quat_params = torch.zeros(joints.shape[:-1] + (4,)).to(joints.device) + # print(quat_params.shape) + root_quat[0] = torch.tensor([[1.0, 0.0, 0.0, 0.0]]) + quat_params[:, 0] = root_quat + # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + + for chain in self._kinematic_tree: + R = root_quat + for j in range(len(chain) - 1): + # (batch, 3) + u = torch.from_numpy(self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0)).to(joints.device) + # print(u.shape) + # (batch, 3) + v = joints[:, chain[j+1]] - joints[:, chain[j]] + # v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis] + v = v / torch.sqrt((v**2).sum(dim=-1)).unsqueeze(1) + # v = torch.norm(v, p =2, dim=-1).unsqueeze(1) + # print(u.shape, v.shape) + rot_u_v = qbetween(u.float(), v) + + R_loc = qmul(qinv(R), rot_u_v) + + quat_params[:,chain[j + 1], :] = R_loc + R = qmul(R, R_loc) + + quat_params = quat_params.reshape(bs, frame, joint_num,4) + + return quat_params + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + joints = torch.zeros(quat_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach() + for i in range(1, len(chain)): + R = qmul(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]] + return joints + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(quat_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0) + for i in range(1, len(chain)): + R = qmul_np(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]] + return joints + + def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(cont6d_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix_np(cont6d_params[:, 0]) + else: + matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0) + for i in range(1, len(chain)): + matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]][..., np.newaxis] + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + + if skel_joints is not None: + # skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device) + joints[..., 0, :] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix(cont6d_params[:, 0]) + else: + matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach() + for i in range(1, len(chain)): + matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]].unsqueeze(-1).to(matR.device) + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + + + + diff --git a/Evaluator_272/mld/models/get_model.py b/Evaluator_272/mld/models/get_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ece21898323c317b77698ad1481bfcfb5aaa2640 --- /dev/null +++ b/Evaluator_272/mld/models/get_model.py @@ -0,0 +1,17 @@ +import importlib + + +def get_model(cfg, datamodule, phase="train"): + modeltype = cfg.model.model_type + if modeltype in ["mld", "temos", "gpt"]: + return get_module(cfg, datamodule) + else: + raise ValueError(f"Invalid model type {modeltype}.") + + +def get_module(cfg, datamodule): + modeltype = cfg.model.model_type + model_module = importlib.import_module( + f".modeltype.{cfg.model.model_type}", package="mld.models") + Model = model_module.__getattribute__(f"{modeltype.upper()}") + return Model(cfg=cfg, datamodule=datamodule) diff --git a/Evaluator_272/mld/models/losses/__init__.py b/Evaluator_272/mld/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea10916883c979d359245cd09fb688035aedc7ea --- /dev/null +++ b/Evaluator_272/mld/models/losses/__init__.py @@ -0,0 +1,2 @@ +from mld.models.losses.temos import TemosLosses +from mld.models.losses.tmost import TmostLosses diff --git a/Evaluator_272/mld/models/losses/actor.py b/Evaluator_272/mld/models/losses/actor.py new file mode 100644 index 0000000000000000000000000000000000000000..49be7134091b236eb014ee6368371b565ecf613e --- /dev/null +++ b/Evaluator_272/mld/models/losses/actor.py @@ -0,0 +1,118 @@ +import torch +import torch.nn as nn +from torchmetrics import Metric + +class ACTORLosses(Metric): + """ + Loss + Modify loss + + """ + def __init__(self, vae, mode, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + + # Save parameters + self.vae = vae + self.mode = mode + + losses = [] + losses.append("recons_feature") + losses.append("recons_verts") + losses.append("recons_joints") + losses.append("recons_limb") + + # latent loss + losses.append("latent_st2sm") + + # KL loss + losses.append("kl_motion") + losses.append("total") + + for loss in losses: + self.register_buffer(loss, torch.tensor(0.0)) + self.register_buffer("count", torch.tensor(0)) + self.losses = losses + + self._losses_func = {} + self._params = {} + for loss in losses: + if loss !='total': + if loss.split('_')[0] == 'kl': + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'cross': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_CROSS + elif loss.split('_')[0] =='latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + elif loss.split('_')[0] =='cycle': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_CYCLE + else: + ValueError("This loss is not recognized.") + + + def update(self, rs_set, dist_ref): + total: float = 0.0 + # Compute the losses + # loss1 - reconstruction loss + total += self._update_loss("recons_feature", rs_set['m_rst'], rs_set['m_ref']) + # total += self._update_loss("recons_verts", rs_set['verts_rs'], rs_set['verts_ref']) + # total += self._update_loss("recons_joints", rs_set['joints_rs'], rs_set['joints_ref']) + # total += self._update_loss("recons_limb", rs_set['rs_base'], rs_set['m1']) + + # loss - text motion latent loss + total += self._update_loss("kl_motion", rs_set['dist_m'], dist_ref) + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + return {loss: getattr(self, loss)/count for loss in self.losses} + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + + +class KLLoss: + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + + +class KLLossMulti: + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) + for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/gpt.py b/Evaluator_272/mld/models/losses/gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..50143562a7c9de4c34788b763d180eb77c3e7706 --- /dev/null +++ b/Evaluator_272/mld/models/losses/gpt.py @@ -0,0 +1,166 @@ +import numpy as np +import torch +import torch.nn as nn +from torchmetrics import Metric + +from mld.data.humanml.scripts.motion_process import (qrot, + recover_root_rot_pos) + +from .infonce import InfoNCE + + +class GPTLosses(Metric): + """ + MLD Loss + """ + + def __init__(self, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + + # Save parameters + # self.vae = vae + # self.vae_type = cfg.model.vae_type + # self.mode = mode + self.cfg = cfg + # self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON + self.stage = cfg.TRAIN.STAGE + + assert self.stage in ["gpt"] + losses = [] + + + + losses.append("ce_motiontoken") + if self.cfg.TRAIN.use_tmr_supervision: + losses.append("contrastive_tmrsupervise") + self.infonce_temp = cfg.LOSS.INFONCE_TEMP + + # self.add_state("count", torch.tensor(0), dist_reduce_fx="mean") + + losses.append("total") + + for loss in losses: + self.add_state(loss, + default=torch.tensor(0.0), + dist_reduce_fx="sum") + # self.register_buffer(loss, torch.tensor(0.0)) + self.add_state("count", torch.tensor(0), dist_reduce_fx="sum") + + if self.stage in ['gpt']: + self.add_state("rightnum", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.add_state("count_all_token", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + + self.losses = losses + + + self._losses_func = {} + self._params = {} + for loss in losses: + if loss.split('_')[0] == 'inst': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'x': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'prior': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_PRIOR + elif loss.split('_')[0] == 'kl': + if cfg.LOSS.LAMBDA_KL != 0.0: + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'gen': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_GEN + elif loss.split('_')[0] == 'latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + elif loss.split('_')[0] == 'ce': + self._losses_func[loss] = torch.nn.CrossEntropyLoss( + reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'contrastive': + self._losses_func[loss] = InfoNCE(self.infonce_temp) + self._params[loss] = cfg.LOSS.LAMBDA_INFONCE + else: + ValueError("This loss is not recognized.") + + def update(self, rs_set): + total: float = 0.0 + + assert len(rs_set['m_rst']) == len(rs_set['m_ref']) + bs = len(rs_set['m_rst']) + + if self.stage in ['gpt']: + + if self.cfg.TRAIN.use_tmr_supervision: + total += self._update_loss("contrastive_tmrsupervise", (rs_set['supervise_motion_feat'], rs_set['supervise_text_feat']), rs_set['emb_dist']) + + for i in range(bs): + total += self._update_loss("ce_motiontoken", rs_set['m_rst'][i], rs_set['m_ref'][i]) / bs # rs_set['m_rst'][i] (16, 513) rs_set['m_ref'][i] (16) + probs = torch.softmax(rs_set['m_rst'][i], dim=-1) + _, cls_pred_index = torch.max(probs, dim=-1) # 16 + self.count_all_token += cls_pred_index.shape[0] + self.rightnum += (cls_pred_index.flatten(0) == rs_set['m_ref'][i].flatten(0)).sum().item() + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + loss_dict = {loss: getattr(self, loss) / count for loss in self.losses} + loss_dict['ACC_token'] = self.rightnum / self.count_all_token + return loss_dict + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + + +class KLLoss: + + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + + +class KLLossMulti: + + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/infonce.py b/Evaluator_272/mld/models/losses/infonce.py new file mode 100644 index 0000000000000000000000000000000000000000..c0065c00c7b8fb4f57d48f8777e4094760725917 --- /dev/null +++ b/Evaluator_272/mld/models/losses/infonce.py @@ -0,0 +1,45 @@ +import torch +import torch.nn.functional as F +import numpy as np + +class InfoNCE: + def __init__(self, t): + # pass + self.t = t + + def __call__(self, f, dist): + ''' + f_motion: N x d + f_text: N x d + ''' + t = self.t + f_motion, f_text = f[0], f[1] + N, d = f_motion.shape[0], f_motion.shape[1] + + + Emb_motion = F.normalize(f_motion, dim=1) + Emb_text = F.normalize(f_text, dim=1) + + t = torch.tensor(t).to(f_motion.device) + logits = torch.mm(Emb_motion, Emb_text.T) + # logits = torch.mm(Emb_motion, Emb_text.T) / torch.exp(t) + if dist is not None: + text_logits = dist.detach() + mask = torch.where(torch.logical_and(text_logits > 0.85, text_logits < 1.0-1e-100), torch.tensor(float('-inf')).to(f_motion.device), torch.tensor(1.0e100).to(f_motion.device)) + mask.diagonal().fill_(float('inf')) + logits = torch.min(mask, logits) + # mask = torch.where((torch.logical_and(text_logits > 0.985, text_logits < 1.0-1e-100)), torch.tensor(float('-inf')).cuda(), torch.tensor(1.0e100).cuda()) + # logits = torch.min(mask, logits) + + N = f_motion.shape[0] + labels = torch.arange(N).to(f_motion.device) + + loss_m = F.cross_entropy(logits / t, labels) + loss_t = F.cross_entropy(logits.T / t, labels) + + loss = (loss_m + loss_t) / 2 + + return loss + + def __repr__(self): + return "InfoNCE()" \ No newline at end of file diff --git a/Evaluator_272/mld/models/losses/kl.py b/Evaluator_272/mld/models/losses/kl.py new file mode 100644 index 0000000000000000000000000000000000000000..6532c0e70e037b4e3bf4c57fb25f21a3bbe5c4a6 --- /dev/null +++ b/Evaluator_272/mld/models/losses/kl.py @@ -0,0 +1,23 @@ +import torch + +class KLLoss: + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + +class KLLossMulti: + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) + for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/mld.py b/Evaluator_272/mld/models/losses/mld.py new file mode 100644 index 0000000000000000000000000000000000000000..1316ce1e9930d82be8c805797e803afd1e30c0ec --- /dev/null +++ b/Evaluator_272/mld/models/losses/mld.py @@ -0,0 +1,340 @@ +import numpy as np +import torch +import torch.nn as nn +from torchmetrics import Metric + +from mld.data.humanml.scripts.motion_process import (qrot, + recover_root_rot_pos) + + +class MLDLosses(Metric): + """ + MLD Loss + """ + + def __init__(self, vae, mode, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + + # Save parameters + # self.vae = vae + self.vae_type = cfg.model.vae_type + self.mode = mode + self.cfg = cfg + self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON + self.stage = cfg.TRAIN.STAGE + + losses = [] + + # diffusion loss + if self.stage in ['diffusion', 'vae_diffusion']: + # instance noise loss + losses.append("inst_loss") + losses.append("x_loss") + if self.cfg.LOSS.LAMBDA_PRIOR != 0.0: + # prior noise loss + losses.append("prior_loss") + + if self.stage in ['vae', 'vae_diffusion']: + # reconstruction loss + losses.append("recons_feature") + losses.append("recons_verts") + losses.append("recons_joints") + losses.append("recons_limb") + + losses.append("gen_feature") + losses.append("gen_joints") + + # KL loss + if self.vae_type in ['mld_dual_vae']: + losses.append("kl_motionbody") + losses.append("kl_motionhand") + else: + losses.append("kl_motion") + + # vel Loss + if cfg.LOSS.Velocity_loss: + losses.append("recons_velocity") + + if self.stage not in ['vae', 'diffusion', 'vae_diffusion']: + raise ValueError(f"Stage {self.stage} not supported") + + losses.append("total") + + for loss in losses: + self.add_state(loss, + default=torch.tensor(0.0), + dist_reduce_fx="sum") + # self.register_buffer(loss, torch.tensor(0.0)) + self.add_state("count", torch.tensor(0), dist_reduce_fx="sum") + self.losses = losses + + self._losses_func = {} + self._params = {} + for loss in losses: + if loss.split('_')[0] == 'inst': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'x': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'prior': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_PRIOR + if loss.split('_')[0] == 'kl': + if cfg.LOSS.LAMBDA_KL != 0.0: + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'gen': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_GEN + elif loss.split('_')[0] == 'latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + + else: + ValueError("This loss is not recognized.") + if loss.split('_')[-1] == 'joints': + self._params[loss] = cfg.LOSS.LAMBDA_JOINT + + def update(self, rs_set): + total: float = 0.0 + # Compute the losses + # Compute instance loss + if self.stage in ["vae", "vae_diffusion"]: + total += self._update_loss("recons_feature", rs_set['m_rst'], + rs_set['m_ref']) + total += self._update_loss("recons_joints", rs_set['joints_rst'], + rs_set['joints_ref']) + if self.vae_type in ["mld_dual_vae"]: + total += self._update_loss("kl_motionbody", rs_set['body_dist_m'], rs_set['body_dist_ref']) + total += self._update_loss("kl_motionhand", rs_set['hand_dist_m'], rs_set['hand_dist_ref']) + else: + total += self._update_loss("kl_motion", rs_set['dist_m'], rs_set['dist_ref']) + + if self.cfg.LOSS.Velocity_loss: + total += self._update_loss("recons_velocity", rs_set['vel_rst'], rs_set['vel_ref']) + + if self.stage in ["diffusion", "vae_diffusion"]: + # predict noise + if self.predict_epsilon: + total += self._update_loss("inst_loss", rs_set['noise_pred'], + rs_set['noise']) + # predict x + else: + total += self._update_loss("x_loss", rs_set['pred'], + rs_set['latent']) + + if self.cfg.LOSS.LAMBDA_PRIOR != 0.0: + # loss - prior loss + total += self._update_loss("prior_loss", rs_set['noise_prior'], + rs_set['dist_m1']) + + if self.stage in ["vae_diffusion"]: + # loss + # noise+text_emb => diff_reverse => latent => decode => motion + total += self._update_loss("gen_feature", rs_set['gen_m_rst'], + rs_set['m_ref']) + total += self._update_loss("gen_joints", rs_set['gen_joints_rst'], + rs_set['joints_ref']) + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + return {loss: getattr(self, loss) / count for loss in self.losses} + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + + + +class MLDLosses_no_joint(Metric): + """ + MLD Loss + """ + + def __init__(self, vae, mode, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + + # Save parameters + # self.vae = vae + self.vae_type = cfg.TRAIN.ABLATION.VAE_TYPE + self.mode = mode + self.cfg = cfg + self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON + self.stage = cfg.TRAIN.STAGE + + losses = [] + + # diffusion loss + if self.stage in ['diffusion', 'vae_diffusion']: + # instance noise loss + losses.append("inst_loss") + losses.append("x_loss") + if self.cfg.LOSS.LAMBDA_PRIOR != 0.0: + # prior noise loss + losses.append("prior_loss") + + if self.stage in ['vae', 'vae_diffusion']: + # reconstruction loss + losses.append("recons_feature") + losses.append("recons_verts") + # losses.append("recons_joints") + losses.append("recons_limb") + + losses.append("gen_feature") + # losses.append("gen_joints") + + # KL loss + losses.append("kl_motion") + + if self.stage not in ['vae', 'diffusion', 'vae_diffusion']: + raise ValueError(f"Stage {self.stage} not supported") + + losses.append("total") + + for loss in losses: + self.add_state(loss, + default=torch.tensor(0.0), + dist_reduce_fx="sum") + # self.register_buffer(loss, torch.tensor(0.0)) + self.add_state("count", torch.tensor(0), dist_reduce_fx="sum") + self.losses = losses + + self._losses_func = {} + self._params = {} + for loss in losses: + if loss.split('_')[0] == 'inst': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'x': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = 1 + elif loss.split('_')[0] == 'prior': + self._losses_func[loss] = nn.MSELoss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_PRIOR + if loss.split('_')[0] == 'kl': + if cfg.LOSS.LAMBDA_KL != 0.0: + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'gen': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_GEN + elif loss.split('_')[0] == 'latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + else: + ValueError("This loss is not recognized.") + if loss.split('_')[-1] == 'joints': + self._params[loss] = cfg.LOSS.LAMBDA_JOINT + + def update(self, rs_set): + total: float = 0.0 + # Compute the losses + # Compute instance loss + if self.stage in ["vae", "vae_diffusion"]: + total += self._update_loss("recons_feature", rs_set['m_rst'], + rs_set['m_ref']) + # total += self._update_loss("recons_joints", rs_set['joints_rst'], + # rs_set['joints_ref']) + total += self._update_loss("kl_motion", rs_set['dist_m'], rs_set['dist_ref']) + + if self.stage in ["diffusion", "vae_diffusion"]: + # predict noise + if self.predict_epsilon: + total += self._update_loss("inst_loss", rs_set['noise_pred'], + rs_set['noise']) + # predict x + else: + total += self._update_loss("x_loss", rs_set['pred'], + rs_set['latent']) + + if self.cfg.LOSS.LAMBDA_PRIOR != 0.0: + # loss - prior loss + total += self._update_loss("prior_loss", rs_set['noise_prior'], + rs_set['dist_m1']) + + if self.stage in ["vae_diffusion"]: + # loss + # noise+text_emb => diff_reverse => latent => decode => motion + total += self._update_loss("gen_feature", rs_set['gen_m_rst'], + rs_set['m_ref']) + # total += self._update_loss("gen_joints", rs_set['gen_joints_rst'], + # rs_set['joints_ref']) + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + return {loss: getattr(self, loss) / count for loss in self.losses} + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + +class KLLoss: + + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + + +class KLLossMulti: + + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/temos.py b/Evaluator_272/mld/models/losses/temos.py new file mode 100644 index 0000000000000000000000000000000000000000..cd02527a5451943f7d837ff26dad0b7af8575ac5 --- /dev/null +++ b/Evaluator_272/mld/models/losses/temos.py @@ -0,0 +1,220 @@ +import torch +import torch.nn as nn +from torchmetrics import Metric +from .infonce import InfoNCE + + +class TemosLosses(Metric): + """ + Loss + Modify loss + refer to temos loss + add loss like deep-motion-editing + 'gen_loss_total': l_total, + 'gen_loss_adv': l_adv, + 'gen_loss_recon_all': l_rec, + 'gen_loss_recon_r': l_r_rec, + 'gen_loss_recon_s': l_s_rec, + 'gen_loss_feature_all': l_ft, + 'gen_loss_feature_r': l_ft_r, + 'gen_loss_feature_s': l_ft_s, + 'gen_loss_feature_t': l_ft_t, + 'gen_loss_quaternion': l_qt, + 'gen_loss_twist': l_tw, + 'gen_loss_triplet': l_triplet, + 'gen_loss_joint': l_joint, + + """ + + def __init__(self, vae, mode, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + # Save parameters + self.vae = vae + self.mode = mode + + self.use_infonce = cfg.LOSS.USE_INFONCE + + if self.use_infonce: + self.infonce_temp = cfg.LOSS.INFONCE_TEMP + + loss_on_both = True + force_loss_on_jfeats = True + ablation_no_kl_combine = False + ablation_no_kl_gaussian = False + ablation_no_motionencoder = False + + infonce_use_latent = False + + self.loss_on_both = loss_on_both + self.ablation_no_kl_combine = ablation_no_kl_combine + self.ablation_no_kl_gaussian = ablation_no_kl_gaussian + self.ablation_no_motionencoder = ablation_no_motionencoder + + self.infonce_use_latent = infonce_use_latent + + losses = [] + if mode == "xyz" or force_loss_on_jfeats: + if not ablation_no_motionencoder: + losses.append("recons_jfeats2jfeats") + losses.append("recons_text2jfeats") + if mode == "smpl": + if not ablation_no_motionencoder: + losses.append("recons_rfeats2rfeats") + losses.append("recons_text2rfeats") + else: + ValueError("This mode is not recognized.") + + if vae or loss_on_both: + kl_losses = [] + if not ablation_no_kl_combine and not ablation_no_motionencoder: + kl_losses.extend(["kl_text2motion", "kl_motion2text"]) + if not ablation_no_kl_gaussian: + if ablation_no_motionencoder: + kl_losses.extend(["kl_text"]) + else: + kl_losses.extend(["kl_text", "kl_motion"]) + losses.extend(kl_losses) + + if not self.vae or loss_on_both: + if not ablation_no_motionencoder: + losses.append("latent_manifold") + losses.append("total") + + if self.use_infonce: + losses.append("contrastive_infonce") + + + for loss in losses: + self.register_buffer(loss, torch.tensor(0.0)) + self.register_buffer("count", torch.tensor(0)) + # self.register_buffer(loss, default=torch.tensor(0.0), dist_reduce_fx="sum") + # self.register_buffer("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.losses = losses + + # Instantiate loss functions + # self._losses_func = {loss: hydra.utils.instantiate(kwargs[loss + "_func"]) + # for loss in losses if loss != "total"} + self._losses_func = {} + self._params = {} + + for loss in losses: + if loss != 'total': + if loss.split('_')[0] == 'kl': + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + elif loss.split('_')[0] == 'cycle': + self._losses_func[loss] = torch.nn.SmoothL1Loss( + reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_CYCLE + elif loss.split('_')[0] == 'contrastive': + self._losses_func[loss] = InfoNCE(self.infonce_temp) + self._params[loss] = cfg.LOSS.LAMBDA_INFONCE + else: + ValueError("This loss is not recognized.") + + def update(self, + f_text=None, + f_motion=None, + f_ref=None, + lat_text=None, + lat_motion=None, + dis_text=None, + dis_motion=None, + dis_ref=None, + emb_dist=None): + total: float = 0.0 + + if self.mode == "xyz" or self.force_loss_on_jfeats: + if not self.ablation_no_motionencoder: + total += self._update_loss("recons_jfeats2jfeats", f_motion, + f_ref) + total += self._update_loss("recons_text2jfeats", f_text, f_ref) + + if self.mode == "smpl": + if not self.ablation_no_motionencoder: + total += self._update_loss("recons_rfeats2rfeats", + f_motion.rfeats, f_ref.rfeats) + total += self._update_loss("recons_text2rfeats", f_text.rfeats, + f_ref.rfeats) + + if self.vae or self.loss_on_both: + if not self.ablation_no_kl_combine and not self.ablation_no_motionencoder: + total += self._update_loss("kl_text2motion", dis_text, + dis_motion) + total += self._update_loss("kl_motion2text", dis_motion, + dis_text) + if not self.ablation_no_kl_gaussian: + total += self._update_loss("kl_text", dis_text, dis_ref) + if not self.ablation_no_motionencoder: + total += self._update_loss("kl_motion", dis_motion, + dis_ref) + if not self.vae or self.loss_on_both: + if not self.ablation_no_motionencoder: + total += self._update_loss("latent_manifold", lat_text, + lat_motion) + + if self.use_infonce: + if self.infonce_use_latent: + # print('use latent feature to calculate caontrastive loss') + total += self._update_loss("contrastive_infonce", (lat_text, lat_motion), emb_dist) + else: + total += self._update_loss("contrastive_infonce", (dis_motion.loc, dis_text.loc), emb_dist) + + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + return {loss: getattr(self, loss) / count for loss in self.losses} + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + + +class KLLoss: + + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + + +class KLLossMulti: + + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/tmost.py b/Evaluator_272/mld/models/losses/tmost.py new file mode 100644 index 0000000000000000000000000000000000000000..14301e0ea2072b7b88216cf2e8a94f9c71741166 --- /dev/null +++ b/Evaluator_272/mld/models/losses/tmost.py @@ -0,0 +1,178 @@ +import torch +import torch.nn as nn +from torchmetrics import Metric + +class TmostLosses(Metric): + """ + Loss + Modify loss + refer to temos loss + add loss like deep-motion-editing + 'gen_loss_total': l_total, + 'gen_loss_adv': l_adv, + 'gen_loss_recon_all': l_rec, + 'gen_loss_recon_r': l_r_rec, + 'gen_loss_recon_s': l_s_rec, + 'gen_loss_feature_all': l_ft, + 'gen_loss_feature_r': l_ft_r, + 'gen_loss_feature_s': l_ft_s, + 'gen_loss_feature_t': l_ft_t, + 'gen_loss_quaternion': l_qt, + 'gen_loss_twist': l_tw, + 'gen_loss_triplet': l_triplet, + 'gen_loss_joint': l_joint, + + """ + def __init__(self, vae, mode, cfg): + super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) + + # Save parameters + self.vae = vae + self.mode = mode + + + losses = [] + losses.append("recons_mm2m") + losses.append("recons_t2m") + + losses.append("cross_mt2m") + losses.append("cross_tm2m") + + # cycle consistency loss + losses.append("cycle_cmsm2mContent") + losses.append("cycle_cmsm2mStyle") + + # latent loss + losses.append("latent_ct2cm") + losses.append("latent_st2sm") + + # KL loss + losses.append("kl_motion") + losses.append("kl_text") + losses.append("kl_ct2cm") + losses.append("kl_cm2ct") + + losses.append("total") + + for loss in losses: + self.register_buffer(loss, torch.tensor(0.0)) + self.register_buffer("count", torch.tensor(0)) + self.losses = losses + + self.ablation_cycle = cfg.TRAIN.ABLATION.CYCLE + + self._losses_func = {} + self._params = {} + for loss in losses: + if loss !='total': + if loss.split('_')[0] == 'kl': + self._losses_func[loss] = KLLoss() + self._params[loss] = cfg.LOSS.LAMBDA_KL + elif loss.split('_')[0] == 'recons': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_REC + elif loss.split('_')[0] == 'cross': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_CROSS + elif loss.split('_')[0] =='latent': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_LATENT + elif loss.split('_')[0] =='cycle': + self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') + self._params[loss] = cfg.LOSS.LAMBDA_CYCLE + else: + ValueError("This loss is not recognized.") + + + def update(self, rs_set, dist_ref): + total: float = 0.0 + + # Compute the losses + """ + loss list + - triplet loss + - anchor style1 + - pos style2 + - neg diff_style + anchor = s_xa + pos = s_xpos + neg = self.gen.enc_style(co_data[diff_style], diff_style[-2:]) + l_triplet = self.triplet_loss(anchor, pos, neg) + - + """ + + + total += self._update_loss("recons_mm2m", rs_set['rs_cm1sm1'], rs_set['m1']) + total += self._update_loss("recons_t2m", rs_set['rs_ct1st1'], rs_set['m1']) + + # loss - cross reconstruction loss + total += self._update_loss("cross_mt2m", rs_set['rs_cm1st1'], rs_set['m1']) + total += self._update_loss("cross_tm2m", rs_set['rs_ct1sm1'], rs_set['m1']) + + + if self.ablation_cycle: + total += self._update_loss("cycle_cmsm2mContent", rs_set['cyc_rs_cm1sm1'], rs_set['m1']) + total += self._update_loss("cycle_cmsm2mStyle", rs_set['cyc_rs_cm2sm2'], rs_set['m2']) + + + total += self._update_loss("latent_ct2cm", rs_set['lat_ct1'], rs_set['lat_cm1']) + total += self._update_loss("latent_st2sm", rs_set['lat_st1'], rs_set['lat_sm1']) + + + total += self._update_loss("kl_motion", rs_set['dist_cm1'], dist_ref) + # total += self._update_loss("kl_motion", rs_set['dist_sm1'], dist_ref) + + total += self._update_loss("kl_text", rs_set['dist_ct1'], dist_ref) + # total += self._update_loss("kl_text", rs_set['dist_st1'], dist_ref) + + total += self._update_loss("kl_ct2cm", rs_set['dist_ct1'], rs_set['dist_cm1']) + total += self._update_loss("kl_cm2ct", rs_set['dist_cm1'], rs_set['dist_ct1']) + + self.total += total.detach() + self.count += 1 + + return total + + def compute(self, split): + count = getattr(self, "count") + return {loss: getattr(self, loss)/count for loss in self.losses} + + def _update_loss(self, loss: str, outputs, inputs): + # Update the loss + val = self._losses_func[loss](outputs, inputs) + getattr(self, loss).__iadd__(val.detach()) + # Return a weighted sum + weighted_loss = self._params[loss] * val + return weighted_loss + + def loss2logname(self, loss: str, split: str): + if loss == "total": + log_name = f"{loss}/{split}" + else: + loss_type, name = loss.split("_") + log_name = f"{loss_type}/{name}/{split}" + return log_name + + +class KLLoss: + def __init__(self): + pass + + def __call__(self, q, p): + div = torch.distributions.kl_divergence(q, p) + return div.mean() + + def __repr__(self): + return "KLLoss()" + + +class KLLossMulti: + def __init__(self): + self.klloss = KLLoss() + + def __call__(self, qlist, plist): + return sum([self.klloss(q, p) + for q, p in zip(qlist, plist)]) + + def __repr__(self): + return "KLLossMulti()" diff --git a/Evaluator_272/mld/models/losses/utils.py b/Evaluator_272/mld/models/losses/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..961744c30af867a3fa19337293cce6ee2cd5c268 --- /dev/null +++ b/Evaluator_272/mld/models/losses/utils.py @@ -0,0 +1,185 @@ + +import torch + +# --- +def keypoint_loss(self, pred_keypoints_2d, gt_keypoints_2d, openpose_weight, gt_weight): + """ + Compute 2D reprojection loss on the keypoints. + The loss is weighted by the confidence. + The available keypoints are different for each dataset. + """ + conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone() + conf[:, :25] *= openpose_weight + conf[:, 25:] *= gt_weight + loss = (conf * self.criterion_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean() + return loss + +def compute_2d_loss(model, batch): + ''' + keypoints loss + ''' + gt = batch["kp_2d"] + out = batch["pred_2d"] + mask = batch["mask"] + + gtmasked = gt[mask] + outmasked = out[mask] + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_limb_loss(model, batch): + # limb position loss + x = batch["x_xyz"] + output = batch["output_xyz"] + mask = batch["mask"] + + # remove glob translation + # [bs njoint nfeats lenghs] = > [bs lengths njoints nfeats] + rootindex = JOINTSTYPE_ROOT[model.jointstype] + gt = x - x[:,:,[rootindex],:] + out = output - output[:,:,[rootindex],:] + + limbndex = JOINTSTYPE_LIMB[model.jointstype] + gtmasked = gt[:,:,limbndex,:][mask] + outmasked = out[:,:,limbndex,:][mask] + + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_glob_loss(model, batch): + # glob rotation for the first (root) joint + x = batch["x"] + output = batch["output"] + mask = batch["mask"] + + # [bs njoint nfeats lenghs] = > [bs lengths njoints nfeats] + rootindex = JOINTSTYPE_ROOT[model.jointstype] + gtmasked = x[:,:,[rootindex],:][mask] + outmasked = output[:,:,[rootindex],:][mask] + + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_theta_loss(model, batch): + x = batch['theta'] + output = batch["output_theta"] + mask = batch["mask"] + + gtmasked = x[mask] + outmasked = output[mask] + + # translation loss + root_index = THETA_MAP['root'] + w_root = batch["w_root"][mask][:,None] + gtmasked[:,root_index] *= w_root + outmasked[:,root_index] *= w_root + + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_rc_loss(model, batch): + x = batch["x"] + output = batch["output"] + mask = batch["mask"] + + gtmasked = x[mask] + outmasked = output[mask] + + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_rcxyz_loss(model, batch): + x = batch["x_xyz"] + output = batch["output_xyz"] + mask = batch["mask"] + + # dummpy + # ---ignore global output for no global dataset--- + root_index = THETA_MAP['root'] + w_root = batch["w_root"][mask][:,None,None] + trans = batch['theta'][:,:,None,root_index,...][mask] + output_trans = batch['output_theta'][:,:,None,root_index][mask] + + gtmasked = x[mask] + outmasked = output[mask] + + gtmasked -= trans*(1-w_root) + outmasked -= output_trans*(1-w_root) + # ------------------------------------------------- + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_rcverts_loss(model, batch): + x = batch["x_vertices"] + output = batch["output_vertices"] + mask = batch["mask"] + + # dummy + # ---ignore global output for no global dataset--- + root_index = THETA_MAP['root'] + w_root = batch["w_root"][mask][:,None,None] + trans = batch['theta'][:,:,None,root_index,...][mask] + output_trans = batch['output_theta'][:,:,None,root_index][mask] + + gtmasked = x[mask] + outmasked = output[mask] + + gtmasked -= trans*(1-w_root) + outmasked -= output_trans*(1-w_root) + # ------------------------------------------------- + loss = F.mse_loss(gtmasked, outmasked, reduction='mean') + return loss + +def compute_vel_loss(model, batch): + x = batch["x"] + output = batch["output"] + gtvel = (x[:,1:,...] - x[:, :-1,...]) + outputvel = (output[:,1:,...] - output[:,1:,...]) + + mask = batch["mask"][:,1:] + + gtvelmasked = gtvel[mask] + outvelmasked = outputvel[mask] + + loss = F.mse_loss(gtvelmasked, outvelmasked, reduction='mean') + return loss + + +def compute_velxyz_loss(model, batch): + x = batch["x_xyz"] + output = batch["output_xyz"] + gtvel = (x[:,1:,...] - x[:,:-1,...]) + outputvel = (output[:,1:,...] - output[:,:-1,...]) + + mask = batch["mask"][:, 1:] + + gtvelmasked = gtvel[mask] + outvelmasked = outputvel[mask] + + loss = F.mse_loss(gtvelmasked, outvelmasked, reduction='mean') + return loss + + +def compute_hp_loss(model, batch): + loss = hessian_penalty(model.return_latent, batch, seed=torch.random.seed()) + return loss + + +def compute_kl_loss(model, batch): + mu, logvar = batch["mu"], batch["logvar"] + loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) + return loss + +_matching_ = {"rc": compute_rc_loss, "kl": compute_kl_loss, "hp": compute_hp_loss, + "rcxyz": compute_rcxyz_loss, + "vel": compute_vel_loss, "velxyz": compute_velxyz_loss, + "glob":compute_glob_loss, "limb":compute_limb_loss, "rcverts": compute_rcverts_loss, + "theta": compute_theta_loss, "2d": compute_2d_loss} + +def get_loss_function(ltype): + return _matching_[ltype] + + +def get_loss_names(): + return list(_matching_.keys()) +# --- diff --git a/Evaluator_272/mld/models/metrics/__init__.py b/Evaluator_272/mld/models/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee054e1bf6bfb8d16eaec4249bf85d7b181b1f70 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/__init__.py @@ -0,0 +1,12 @@ +from .compute import ComputeMetrics +from .mr import MRMetrics +from .tm2t import TM2TMetrics +from .tm2t_R256 import TM2TMetrics_R256 +from .tmr_tm2t import TMR_TM2TMetrics +from .mm import MMMetrics +# from .gru import HUMANACTMetrics +# from .stgcn import UESTCMetrics +from .uncond import UncondMetrics +from .compute_body_hand import ComputeMetrics_body_hand +# from .mr_body_hand import MRMetrics_body_hand +from .acc import ACCMetrics diff --git a/Evaluator_272/mld/models/metrics/acc.py b/Evaluator_272/mld/models/metrics/acc.py new file mode 100644 index 0000000000000000000000000000000000000000..dee70f79bbc2bee9e20474c60973c316f464e764 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/acc.py @@ -0,0 +1,96 @@ +from typing import List +import random +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance +import os +from .utils import * + + + +class ACCMetrics(Metric): + + def __init__(self, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "acc" + + # add metrics + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + self.metrics = [] + # Accuracy + self.add_state("accuracy", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("gt_accuracy", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.metrics.extend(["accuracy", "gt_accuracy"]) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # Accuracy + self.accuracy = torch.trace(self.confusion) / torch.sum(self.confusion) + self.gt_accuracy = torch.trace(self.gt_confusion) / torch.sum( + self.gt_confusion) + + # cat all embeddings + all_labels = torch.cat(self.label_embeddings, axis=0) + all_genmotions = torch.cat(self.recmotion_embeddings, axis=0) + all_gtmotions = torch.cat(self.gtmotion_embeddings, axis=0) + all_gtmotions2 = all_gtmotions.clone()[ + torch.randperm(all_gtmotions.shape[0]), :] + genstats = calculate_activation_statistics(all_genmotions) + gtstats = calculate_activation_statistics(all_gtmotions) + gtstats2 = calculate_activation_statistics(all_gtmotions2) + + all_labels = all_labels.cpu() + + # calculate diversity and multimodality + self.Diversity, self.Multimodality = calculate_diversity_multimodality( + all_genmotions, + all_labels, + self.num_labels, + diversity_times=self.diversity_times, + multimodality_times=self.multimodality_times) + + self.gt_Diversity, self.gt_Multimodality = calculate_diversity_multimodality( + all_gtmotions, all_labels, self.num_labels) + + metrics.update( + {metric: getattr(self, metric) + for metric in self.metrics}) + + # Compute Fid + metrics["FID"] = calculate_fid(gtstats, genstats) + metrics["gt_FID"] = calculate_fid(gtstats, gtstats2) + + return {**metrics} + + def update( + self, + pred_idx: List, + label: List, + lengths: List[int] + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + + + diff --git a/Evaluator_272/mld/models/metrics/compute.py b/Evaluator_272/mld/models/metrics/compute.py new file mode 100644 index 0000000000000000000000000000000000000000..6a093e3e60f7ad1c9387e9b0c257c4306e4b01c0 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/compute.py @@ -0,0 +1,196 @@ +from typing import List + +import torch +from einops import rearrange +from torch import Tensor +from torchmetrics import Metric + +from mld.models.tools.tools import remove_padding +from mld.transforms.joints2jfeats import Rifke +from mld.utils.geometry import matrix_of_angles + +from .utils import l2_norm, variance + + +class ComputeMetrics(Metric): + + def __init__(self, + njoints, + jointstype: str = "mmm", + force_in_meter: bool = True, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + if jointstype not in ["mmm", "humanml3d", "motionx", "motionx_v26"]: + raise NotImplementedError("This jointstype is not implemented.") + + self.name = 'APE and AVE' + self.jointstype = jointstype + self.rifke = Rifke(jointstype=jointstype, normalization=False) + + self.force_in_meter = force_in_meter + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + # APE + self.add_state("APE_root", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("APE_traj", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("APE_pose", + default=torch.zeros(njoints - 1), + dist_reduce_fx="sum") + self.add_state("APE_joints", + default=torch.zeros(njoints), + dist_reduce_fx="sum") + self.APE_metrics = ["APE_root", "APE_traj", "APE_pose", "APE_joints"] + + # AVE + self.add_state("AVE_root", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("AVE_traj", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("AVE_pose", + default=torch.zeros(njoints - 1), + dist_reduce_fx="sum") + self.add_state("AVE_joints", + default=torch.zeros(njoints), + dist_reduce_fx="sum") + self.AVE_metrics = ["AVE_root", "AVE_traj", "AVE_pose", "AVE_joints"] + + # All metric + self.metrics = self.APE_metrics + self.AVE_metrics + + def compute(self, sanity_flag): + count = self.count + APE_metrics = { + metric: getattr(self, metric) / count + for metric in self.APE_metrics + } + + # Compute average of APEs + APE_metrics["APE_mean_pose"] = self.APE_pose.mean() / count + APE_metrics["APE_mean_joints"] = self.APE_joints.mean() / count + + # Remove arrays + APE_metrics.pop("APE_pose") + APE_metrics.pop("APE_joints") + + count_seq = self.count_seq + AVE_metrics = { + metric: getattr(self, metric) / count_seq + for metric in self.AVE_metrics + } + + # Compute average of AVEs + AVE_metrics["AVE_mean_pose"] = self.AVE_pose.mean() / count_seq + AVE_metrics["AVE_mean_joints"] = self.AVE_joints.mean() / count_seq + + # Remove arrays + AVE_metrics.pop("AVE_pose") + AVE_metrics.pop("AVE_joints") + + return {**APE_metrics, **AVE_metrics} + + def update(self, jts_text: Tensor, jts_ref: Tensor, lengths: List[int]): + self.count += sum(lengths) + self.count_seq += len(lengths) + + jts_text, poses_text, root_text, traj_text = self.transform( + jts_text, lengths) + jts_ref, poses_ref, root_ref, traj_ref = self.transform( + jts_ref, lengths) + + for i in range(len(lengths)): + self.APE_root += l2_norm(root_text[i], root_ref[i], dim=1).sum() + self.APE_pose += l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) + self.APE_traj += l2_norm(traj_text[i], traj_ref[i], dim=1).sum() + self.APE_joints += l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) + + root_sigma_text = variance(root_text[i], lengths[i], dim=0) + root_sigma_ref = variance(root_ref[i], lengths[i], dim=0) + self.AVE_root += l2_norm(root_sigma_text, root_sigma_ref, dim=0) + + traj_sigma_text = variance(traj_text[i], lengths[i], dim=0) + traj_sigma_ref = variance(traj_ref[i], lengths[i], dim=0) + self.AVE_traj += l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) + + poses_sigma_text = variance(poses_text[i], lengths[i], dim=0) + poses_sigma_ref = variance(poses_ref[i], lengths[i], dim=0) + self.AVE_pose += l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) + + jts_sigma_text = variance(jts_text[i], lengths[i], dim=0) + jts_sigma_ref = variance(jts_ref[i], lengths[i], dim=0) + self.AVE_joints += l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) + + def transform(self, joints: Tensor, lengths): + features = self.rifke(joints) + + ret = self.rifke.extract(features) + root_y, poses_features, vel_angles, vel_trajectory_local = ret + # already have the good dimensionality + angles = torch.cumsum(vel_angles, dim=-1) + # First frame should be 0, but if infered it is better to ensure it + angles = angles - angles[..., [0]] + + cos, sin = torch.cos(angles), torch.sin(angles) + rotations = matrix_of_angles(cos, sin, inv=False) + + # Get back the local poses + poses_local = rearrange(poses_features, + "... (joints xyz) -> ... joints xyz", + xyz=3) + + # Rotate the poses + poses = torch.einsum("...lj,...jk->...lk", poses_local[..., [0, 2]], + rotations) + poses = torch.stack( + (poses[..., 0], poses_local[..., 1], poses[..., 1]), axis=-1) + + # Rotate the vel_trajectory + vel_trajectory = torch.einsum("...j,...jk->...k", vel_trajectory_local, + rotations) + # Integrate the trajectory + # Already have the good dimensionality + trajectory = torch.cumsum(vel_trajectory, dim=-2) + # First frame should be 0, but if infered it is better to ensure it + trajectory = trajectory - trajectory[..., [0], :] + + # get the root joint + root = torch.cat( + (trajectory[..., :, [0]], root_y[..., None], trajectory[..., :, + [1]]), + dim=-1) + + # Add the root joints (which is still zero) + poses = torch.cat((0 * poses[..., [0], :], poses), -2) + # put back the root joint y + poses[..., 0, 1] = root_y + + # Add the trajectory globally + poses[..., [0, 2]] += trajectory[..., None, :] + if self.force_in_meter: + # different jointstypes have different scale factors + if self.jointstype == 'mmm': + factor = 1000.0 + elif self.jointstype in ['humanml3d', 'motionx', 'motionx_v26']: + factor = 1000.0 * 0.75 / 480.0 + else: + raise NotImplementedError("This jointstype is not implemented.") + + # return results in meters + return (remove_padding(poses / factor, lengths), # torch.Size([32, 196, 52, 3]) + remove_padding(poses_local / factor, lengths), #torch.Size([32, 196, 51, 3]) + remove_padding(root / factor, lengths), + remove_padding(trajectory / factor, lengths)) + else: + return (remove_padding(poses, lengths), + remove_padding(poses_local, + lengths), remove_padding(root, lengths), + remove_padding(trajectory, lengths)) diff --git a/Evaluator_272/mld/models/metrics/compute_best.py b/Evaluator_272/mld/models/metrics/compute_best.py new file mode 100644 index 0000000000000000000000000000000000000000..fc3bf47a746e968f2df23a48b1db613815aa4127 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/compute_best.py @@ -0,0 +1,60 @@ +from typing import List + +import torch +from einops import rearrange +from torch import Tensor +from torchmetrics import Metric +import numpy as np +from .compute import ComputeMetrics, l2_norm, variance + + +class ComputeMetricsBest(ComputeMetrics): + def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]): + self.count += sum(lengths[0]) + self.count_seq += len(lengths[0]) + + ntrials = len(jts_text_) + metrics = [] + for index in range(ntrials): + jts_text, poses_text, root_text, traj_text = self.transform(jts_text_[index], lengths[index]) + jts_ref, poses_ref, root_ref, traj_ref = self.transform(jts_ref_[index], lengths[index]) + + mets = [] + for i in range(len(lengths[index])): + APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum() + APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) + APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum() + APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) + + root_sigma_text = variance(root_text[i], lengths[index][i], dim=0) + root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0) + AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0) + + traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0) + traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0) + AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) + + poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0) + poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0) + AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) + + jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0) + jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0) + AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) + + met = [APE_root, APE_pose, APE_traj, APE_joints, + AVE_root, AVE_pose, AVE_traj, AVE_joints] + mets.append(met) + metrics.append(mets) + + # Quick hacks + mmm = metrics[np.argmin([x[0][0] for x in metrics])] + APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints = mmm[0] + self.APE_root += APE_root + self.APE_pose += APE_pose + self.APE_traj += APE_traj + self.APE_joints += APE_joints + self.AVE_root += AVE_root + self.AVE_pose += AVE_pose + self.AVE_traj += AVE_traj + self.AVE_joints += AVE_joints diff --git a/Evaluator_272/mld/models/metrics/compute_body_hand.py b/Evaluator_272/mld/models/metrics/compute_body_hand.py new file mode 100644 index 0000000000000000000000000000000000000000..b5eadf90d0ecc9783e0481aa4ee9bb549b7144a2 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/compute_body_hand.py @@ -0,0 +1,286 @@ +from typing import List + +import torch +from einops import rearrange +from torch import Tensor +from torchmetrics import Metric + +from mld.models.tools.tools import remove_padding +from mld.transforms.joints2jfeats import Rifke +from mld.utils.geometry import matrix_of_angles + +from .utils import l2_norm, variance + + +class ComputeMetrics_body_hand(Metric): + + def __init__(self, + njoints, + jointstype: str = "mmm", + force_in_meter: bool = False, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + if jointstype not in ["mmm", "humanml3d", "motionx", 'motionx_v26']: + raise NotImplementedError("This jointstype is not implemented.") + self.name = 'APE and AVE' + self.jointstype = jointstype + self.rifke = Rifke(jointstype=jointstype, normalization=False) + + self.force_in_meter = force_in_meter + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + # APE + self.add_state("APE_root", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("APE_traj", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("APE_pose", + default=torch.zeros(njoints - 1), + dist_reduce_fx="sum") + + self.add_state("APE_pose_body", + default=torch.zeros(22 - 1), + dist_reduce_fx="sum") + + self.add_state("APE_pose_hand", + default=torch.zeros(30), + dist_reduce_fx="sum") + + + self.add_state("APE_joints", + default=torch.zeros(njoints), + dist_reduce_fx="sum") + + + self.add_state("APE_joints_body", + default=torch.zeros(22), + dist_reduce_fx="sum") + + self.add_state("APE_joints_hand", + default=torch.zeros(30), + dist_reduce_fx="sum") + + self.APE_metrics = ["APE_root", "APE_traj", "APE_pose", "APE_pose_body", "APE_pose_hand", "APE_joints", "APE_joints_body", "APE_joints_hand"] + + # AVE + self.add_state("AVE_root", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("AVE_traj", + default=torch.tensor(0.), + dist_reduce_fx="sum") + self.add_state("AVE_pose", + default=torch.zeros(njoints - 1), + dist_reduce_fx="sum") + + self.add_state("AVE_pose_body", + default=torch.zeros(22 - 1), + dist_reduce_fx="sum") + + self.add_state("AVE_pose_hand", + default=torch.zeros(30), + dist_reduce_fx="sum") + + self.add_state("AVE_joints", + default=torch.zeros(njoints), + dist_reduce_fx="sum") + + self.add_state("AVE_joints_body", + default=torch.zeros(22), + dist_reduce_fx="sum") + + self.add_state("AVE_joints_hand", + default=torch.zeros(30), + dist_reduce_fx="sum") + + + self.AVE_metrics = ["AVE_root", "AVE_traj", "AVE_pose", "AVE_pose_body", "AVE_pose_hand", "AVE_joints", "AVE_joints_body", "AVE_joints_hand"] + + # All metric + self.metrics = self.APE_metrics + self.AVE_metrics + + def compute(self, sanity_flag): + count = self.count + APE_metrics = { + metric: getattr(self, metric) / count + for metric in self.APE_metrics + } + + # Compute average of APEs + APE_metrics["APE_mean_pose"] = self.APE_pose.mean() / count + APE_metrics["APE_mean_pose_body"] = self.APE_pose_body.mean() / count + APE_metrics["APE_mean_pose_hand"] = self.APE_pose_hand.mean() / count + APE_metrics["APE_mean_joints"] = self.APE_joints.mean() / count + APE_metrics["APE_mean_joints_body"] = self.APE_joints_body.mean() / count + APE_metrics["APE_mean_joints_hand"] = self.APE_joints_hand.mean() / count + + # Remove arrays + APE_metrics.pop("APE_pose") + APE_metrics.pop("APE_pose_body") + APE_metrics.pop("APE_pose_hand") + APE_metrics.pop("APE_joints") + APE_metrics.pop("APE_joints_body") + APE_metrics.pop("APE_joints_hand") + + count_seq = self.count_seq + AVE_metrics = { + metric: getattr(self, metric) / count_seq + for metric in self.AVE_metrics + } + + # Compute average of AVEs + AVE_metrics["AVE_mean_pose"] = self.AVE_pose.mean() / count_seq + AVE_metrics["AVE_mean_pose_body"] = self.AVE_pose_body.mean() / count_seq + AVE_metrics["AVE_mean_pose_hand"] = self.AVE_pose_hand.mean() / count_seq + AVE_metrics["AVE_mean_joints"] = self.AVE_joints.mean() / count_seq + AVE_metrics["AVE_mean_joints_body"] = self.AVE_joints_body.mean() / count_seq + AVE_metrics["AVE_mean_joints_hand"] = self.AVE_joints_hand.mean() / count_seq + + # Remove arrays + AVE_metrics.pop("AVE_pose") + AVE_metrics.pop("AVE_pose_body") + AVE_metrics.pop("AVE_pose_hand") + AVE_metrics.pop("AVE_joints") + AVE_metrics.pop("AVE_joints_body") + AVE_metrics.pop("AVE_joints_hand") + + + return {**APE_metrics, **AVE_metrics} + + def update(self, jts_text: Tensor, jts_ref: Tensor, lengths: List[int]): + self.count += sum(lengths) + self.count_seq += len(lengths) + + jts_text, poses_text, root_text, traj_text = self.transform( + jts_text, lengths) + jts_ref, poses_ref, root_ref, traj_ref = self.transform( + jts_ref, lengths) + + + for i in range(len(lengths)): + jts_text_body = jts_text[i][..., :22, :] + jts_text_hand = jts_text[i][..., 22:, :] + jts_ref_body = jts_ref[i][..., :22, :] + jts_ref_hand = jts_ref[i][..., 22:, :] + + + poses_text_body = poses_text[i][..., :21, :] + poses_text_hand = poses_text[i][..., 21:, :] + poses_ref_body = poses_ref[i][..., :21, :] + poses_ref_hand = poses_ref[i][..., 21:, :] + + self.APE_root += l2_norm(root_text[i], root_ref[i], dim=1).sum() + self.APE_pose += l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) + self.APE_pose_body += l2_norm(poses_text_body, poses_ref_body, dim=2).sum(0) + self.APE_pose_hand += l2_norm(poses_text_hand, poses_ref_hand, dim=2).sum(0) + + self.APE_traj += l2_norm(traj_text[i], traj_ref[i], dim=1).sum() + self.APE_joints += l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) + self.APE_joints_body += l2_norm(jts_text_body, jts_ref_body, dim=2).sum(0) + self.APE_joints_hand += l2_norm(jts_text_hand, jts_ref_hand, dim=2).sum(0) + + root_sigma_text = variance(root_text[i], lengths[i], dim=0) + root_sigma_ref = variance(root_ref[i], lengths[i], dim=0) + self.AVE_root += l2_norm(root_sigma_text, root_sigma_ref, dim=0) + + traj_sigma_text = variance(traj_text[i], lengths[i], dim=0) + traj_sigma_ref = variance(traj_ref[i], lengths[i], dim=0) + self.AVE_traj += l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) + + poses_sigma_text = variance(poses_text[i], lengths[i], dim=0) + poses_sigma_ref = variance(poses_ref[i], lengths[i], dim=0) + self.AVE_pose += l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) + + poses_body_sigma_text = variance(poses_text_body, lengths[i], dim=0) + poses_body_sigma_ref = variance(poses_ref_body, lengths[i], dim=0) + self.AVE_pose_body += l2_norm(poses_body_sigma_text, poses_body_sigma_ref, dim=1) + + + poses_hand_sigma_text = variance(poses_text_hand, lengths[i], dim=0) + poses_hand_sigma_ref = variance(poses_ref_hand, lengths[i], dim=0) + self.AVE_pose_hand += l2_norm(poses_hand_sigma_text, poses_hand_sigma_ref, dim=1) + + + jts_sigma_text = variance(jts_text[i], lengths[i], dim=0) + jts_sigma_ref = variance(jts_ref[i], lengths[i], dim=0) + self.AVE_joints += l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) + + jts_body_sigma_text = variance(jts_text_body, lengths[i], dim=0) + jts_body_sigma_ref = variance(jts_ref_body, lengths[i], dim=0) + self.AVE_joints_body += l2_norm(jts_body_sigma_text, jts_body_sigma_ref, dim=1) + + jts_hand_sigma_text = variance(jts_text_hand, lengths[i], dim=0) + jts_hand_sigma_ref = variance(jts_ref_hand, lengths[i], dim=0) + self.AVE_joints_hand += l2_norm(jts_hand_sigma_text, jts_hand_sigma_ref, dim=1) + + + + def transform(self, joints: Tensor, lengths): + features = self.rifke(joints) + + ret = self.rifke.extract(features) + root_y, poses_features, vel_angles, vel_trajectory_local = ret + # already have the good dimensionality + angles = torch.cumsum(vel_angles, dim=-1) + # First frame should be 0, but if infered it is better to ensure it + angles = angles - angles[..., [0]] + + cos, sin = torch.cos(angles), torch.sin(angles) + rotations = matrix_of_angles(cos, sin, inv=False) + + # Get back the local poses + poses_local = rearrange(poses_features, + "... (joints xyz) -> ... joints xyz", + xyz=3) + + # Rotate the poses + poses = torch.einsum("...lj,...jk->...lk", poses_local[..., [0, 2]], + rotations) + poses = torch.stack( + (poses[..., 0], poses_local[..., 1], poses[..., 1]), axis=-1) + + # Rotate the vel_trajectory + vel_trajectory = torch.einsum("...j,...jk->...k", vel_trajectory_local, + rotations) + # Integrate the trajectory + # Already have the good dimensionality + trajectory = torch.cumsum(vel_trajectory, dim=-2) + # First frame should be 0, but if infered it is better to ensure it + trajectory = trajectory - trajectory[..., [0], :] + + # get the root joint + root = torch.cat( + (trajectory[..., :, [0]], root_y[..., None], trajectory[..., :, + [1]]), + dim=-1) + + # Add the root joints (which is still zero) + poses = torch.cat((0 * poses[..., [0], :], poses), -2) + # put back the root joint y + poses[..., 0, 1] = root_y + + # Add the trajectory globally + poses[..., [0, 2]] += trajectory[..., None, :] + if self.force_in_meter: + # different jointstypes have different scale factors + if self.jointstype == 'mmm': + factor = 1000.0 + elif self.jointstype in ['humanml3d', 'motionx']: + factor = 1000.0 * 0.75 / 480.0 + + # return results in meters + return (remove_padding(poses / factor, lengths), # torch.Size([32, 196, 52, 3]) + remove_padding(poses_local / factor, lengths), #torch.Size([32, 196, 51, 3]) + remove_padding(root / factor, lengths), + remove_padding(trajectory / factor, lengths)) + else: + return (remove_padding(poses, lengths), + remove_padding(poses_local, + lengths), remove_padding(root, lengths), + remove_padding(trajectory, lengths)) diff --git a/Evaluator_272/mld/models/metrics/compute_worst.py b/Evaluator_272/mld/models/metrics/compute_worst.py new file mode 100644 index 0000000000000000000000000000000000000000..95b489ad75b536ba1b1e9f0be4063afa4d010086 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/compute_worst.py @@ -0,0 +1,60 @@ +from typing import List + +import torch +from einops import rearrange +from torch import Tensor +from torchmetrics import Metric +import numpy as np +from .compute import ComputeMetrics, l2_norm, variance + + +class ComputeMetricsWorst(ComputeMetrics): + def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]): + self.count += sum(lengths[0]) + self.count_seq += len(lengths[0]) + + ntrials = len(jts_text_) + metrics = [] + for index in range(ntrials): + jts_text, poses_text, root_text, traj_text = self.transform(jts_text_[index], lengths[index]) + jts_ref, poses_ref, root_ref, traj_ref = self.transform(jts_ref_[index], lengths[index]) + + mets = [] + for i in range(len(lengths[index])): + APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum() + APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) + APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum() + APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) + + root_sigma_text = variance(root_text[i], lengths[index][i], dim=0) + root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0) + AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0) + + traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0) + traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0) + AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) + + poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0) + poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0) + AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) + + jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0) + jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0) + AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) + + met = [APE_root, APE_pose, APE_traj, APE_joints, + AVE_root, AVE_pose, AVE_traj, AVE_joints] + mets.append(met) + metrics.append(mets) + + # Quick hacks + mmm = metrics[np.argmax([x[0][0] for x in metrics])] + APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints = mmm[0] + self.APE_root += APE_root + self.APE_pose += APE_pose + self.APE_traj += APE_traj + self.APE_joints += APE_joints + self.AVE_root += AVE_root + self.AVE_pose += AVE_pose + self.AVE_traj += AVE_traj + self.AVE_joints += AVE_joints diff --git a/Evaluator_272/mld/models/metrics/mm.py b/Evaluator_272/mld/models/metrics/mm.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc23a4800972121ba1934a9c7c8215f2c5b1450 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/mm.py @@ -0,0 +1,62 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class MMMetrics(Metric): + full_state_update = True + + def __init__(self, mm_num_times=10, dist_sync_on_step=True, **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "MultiModality scores" + + self.mm_num_times = mm_num_times + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = ["MultiModality"] + self.add_state("MultiModality", + default=torch.tensor(0.), + dist_reduce_fx="sum") + + # chached batches + self.add_state("mm_motion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # cat all embeddings + all_mm_motions = torch.cat(self.mm_motion_embeddings, + axis=0).cpu().numpy() + metrics['MultiModality'] = calculate_multimodality_np( + all_mm_motions, self.mm_num_times) + + return {**metrics} + + def update( + self, + mm_motion_embeddings: Tensor, + lengths: List[int], + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + # store all mm motion embeddings + self.mm_motion_embeddings.append(mm_motion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/mr.py b/Evaluator_272/mld/models/metrics/mr.py new file mode 100644 index 0000000000000000000000000000000000000000..d50d105ea1fa5e9fd59e5fc87a0196a8a4af4ad8 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/mr.py @@ -0,0 +1,106 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric + +from .utils import * + + +# motion reconstruction metric +class MRMetrics(Metric): + + def __init__(self, + njoints, + jointstype: str = "mmm", + force_in_meter: bool = True, + align_root: bool = True, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + if jointstype not in ["mmm", "humanml3d", "motionx", "motionx_v26"]: + raise NotImplementedError("This jointstype is not implemented.") + + self.name = 'Motion Reconstructions' + self.jointstype = jointstype + self.align_root = align_root + self.force_in_meter = force_in_meter + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.add_state("MPJPE", + default=torch.tensor([0.0]), + dist_reduce_fx="sum") + + + self.add_state("PAMPJPE", + default=torch.tensor([0.0]), + dist_reduce_fx="sum") + + + + self.add_state("ACCEL", + default=torch.tensor([0.0]), + dist_reduce_fx="sum") + + + # todo + # self.add_state("ROOT", default=torch.tensor([0.0]), dist_reduce_fx="sum") + + self.MR_metrics = ["MPJPE", "PAMPJPE", "ACCEL"] + + # All metric + self.metrics = self.MR_metrics + + def compute(self, sanity_flag): + if self.force_in_meter: + # different jointstypes have different scale factors + # if self.jointstype == 'mmm': + # factor = 1000.0 + # elif self.jointstype == 'humanml3d': + # factor = 1000.0 * 0.75 / 480 + factor = 1000.0 + else: + factor = 1.0 + + count = self.count + count_seq = self.count_seq + mr_metrics = {} + mr_metrics["MPJPE"] = self.MPJPE / count * factor + + mr_metrics["PAMPJPE"] = self.PAMPJPE / count * factor + + # accel error: joints_gt[:-2] - 2 * joints_gt[1:-1] + joints_gt[2:] + # n-2 for each sequences + mr_metrics["ACCEL"] = self.ACCEL / (count - 2 * count_seq) * factor + + return mr_metrics + + def update(self, joints_rst: Tensor, joints_ref: Tensor, + lengths: List[int]): + assert joints_rst.shape == joints_ref.shape + assert joints_rst.dim() == 4 + # (bs, seq, njoint=22, 3) + + self.count += sum(lengths) + self.count_seq += len(lengths) + + # avoid cuda error of DDP in pampjpe + rst = joints_rst.detach().cpu() + ref = joints_ref.detach().cpu() + + # align root joints index + if self.align_root and self.jointstype in ['mmm', 'humanml3d', 'motionx']: + align_inds = [0] + else: + align_inds = None + + for i in range(len(lengths)): + self.MPJPE += torch.sum( + calc_mpjpe(rst[i], ref[i], align_inds=align_inds)) + self.PAMPJPE += torch.sum(calc_pampjpe(rst[i], ref[i])) + self.ACCEL += torch.sum(calc_accel(rst[i], ref[i])) diff --git a/Evaluator_272/mld/models/metrics/retrieval_recall.py b/Evaluator_272/mld/models/metrics/retrieval_recall.py new file mode 100644 index 0000000000000000000000000000000000000000..5483db36d99155efcad2f9e14781340e85c4b1ef --- /dev/null +++ b/Evaluator_272/mld/models/metrics/retrieval_recall.py @@ -0,0 +1,170 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class Retrieval_Recall_Metrics(Metric): + full_state_update = True + + def __init__(self, + # top_k=3, + # R_size=32, + # diversity_times=300, + mode = ['all'], + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "Retrieval_recall" + + if 'small_batch' in mode: + self.R_size = R_size + + # self.top_k = top_k + self.top_k = ['1', '2', '3', '5', '10'] + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = [] + # Matching scores + # self.add_state("Matching_score", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + # self.add_state("gt_Matching_score", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + # self.Matching_metrics = ["Matching_score", "gt_Matching_score"] + self.Matching_metrics = [] + for k in self.top_k: + self.add_state( + f"R_precision_top_{k}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"R_precision_top_{k}") + for k in self.top_k: + self.add_state( + f"gt_R_precision_top_{k}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"gt_R_precision_top_{k}") + + self.metrics.extend(self.Matching_metrics) + + # chached batches + self.add_state("text_embeddings", default=[], dist_reduce_fx=None) + self.add_state("recmotion_embeddings", default=[], dist_reduce_fx=None) + self.add_state("gtmotion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + # cat all embeddings + shuffle_idx = torch.randperm(count_seq) + all_texts = torch.cat(self.text_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_genmotions = torch.cat(self.recmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_gtmotions = torch.cat(self.gtmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + + # Compute r-precision + assert count_seq > self.R_size + # print("**********************************") + # print(count_seq) + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_genmotions[i * self.R_size:(i + 1) * + self.R_size] + # dist_mat = pairwise_euclidean_distance(group_texts, group_motions) + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # print(dist_mat[:5]) + self.Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + R_count = count_seq // self.R_size * self.R_size + metrics["Matching_score"] = self.Matching_score / R_count + for k in range(self.top_k): + metrics[f"R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # Compute r-precision with gt + assert count_seq > self.R_size + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_gtmotions[i * self.R_size:(i + 1) * + self.R_size] + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # match score + self.gt_Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + metrics["gt_Matching_score"] = self.gt_Matching_score / R_count + for k in range(self.top_k): + metrics[f"gt_R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # tensor -> numpy for FID + all_genmotions = all_genmotions.numpy() + all_gtmotions = all_gtmotions.numpy() + + # Compute fid + mu, cov = calculate_activation_statistics_np(all_genmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + metrics["FID"] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) + + # Compute diversity + assert count_seq > self.diversity_times + metrics["Diversity"] = calculate_diversity_np(all_genmotions, + self.diversity_times) + metrics["gt_Diversity"] = calculate_diversity_np( + all_gtmotions, self.diversity_times) + + return {**metrics} + + def update( + self, + text_embeddings: Tensor, + recmotion_embeddings: Tensor, + gtmotion_embeddings: Tensor, + lengths: List[int], + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + # [bs, nlatent*ndim] <= [bs, nlatent, ndim] + text_embeddings = torch.flatten(text_embeddings, start_dim=1).detach() + recmotion_embeddings = torch.flatten(recmotion_embeddings, + start_dim=1).detach() + gtmotion_embeddings = torch.flatten(gtmotion_embeddings, + start_dim=1).detach() + + # store all texts and motions + self.text_embeddings.append(text_embeddings) + self.recmotion_embeddings.append(recmotion_embeddings) + self.gtmotion_embeddings.append(gtmotion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/tm2t.py b/Evaluator_272/mld/models/metrics/tm2t.py new file mode 100644 index 0000000000000000000000000000000000000000..9a12ac4f0e3f2b446a95f85dcf66b9f437148065 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/tm2t.py @@ -0,0 +1,180 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class TM2TMetrics(Metric): + full_state_update = True + + def __init__(self, + top_k=3, + R_size=32, + diversity_times=300, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "matching, fid, and diversity scores" + + self.top_k = top_k + self.R_size = R_size + self.diversity_times = diversity_times + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = [] + # Matching scores + self.add_state("Matching_score", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.add_state("gt_Matching_score", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.Matching_metrics = ["Matching_score", "gt_Matching_score"] + for k in range(1, top_k + 1): + self.add_state( + f"R_precision_top_{str(k)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"R_precision_top_{str(k)}") + for k in range(1, top_k + 1): + self.add_state( + f"gt_R_precision_top_{str(k)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"gt_R_precision_top_{str(k)}") + + self.metrics.extend(self.Matching_metrics) + + # Fid + self.add_state("FID", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.metrics.append("FID") + + # Diversity + self.add_state("Diversity", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.add_state("gt_Diversity", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.metrics.extend(["Diversity", "gt_Diversity"]) + + # chached batches + self.add_state("text_embeddings", default=[], dist_reduce_fx=None) + self.add_state("recmotion_embeddings", default=[], dist_reduce_fx=None) + self.add_state("gtmotion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # cat all embeddings + shuffle_idx = torch.randperm(count_seq) + all_texts = torch.cat(self.text_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_genmotions = torch.cat(self.recmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_gtmotions = torch.cat(self.gtmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + + # Compute r-precision + assert count_seq > self.R_size + # print("**********************************") + # print(count_seq) + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_genmotions[i * self.R_size:(i + 1) * + self.R_size] + # dist_mat = pairwise_euclidean_distance(group_texts, group_motions) + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # print(dist_mat[:5]) + self.Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + R_count = count_seq // self.R_size * self.R_size + metrics["Matching_score"] = self.Matching_score / R_count + for k in range(self.top_k): + metrics[f"R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # Compute r-precision with gt + assert count_seq > self.R_size + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_gtmotions[i * self.R_size:(i + 1) * + self.R_size] + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # match score + self.gt_Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + metrics["gt_Matching_score"] = self.gt_Matching_score / R_count + for k in range(self.top_k): + metrics[f"gt_R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # tensor -> numpy for FID + all_genmotions = all_genmotions.numpy() + all_gtmotions = all_gtmotions.numpy() + + # Compute fid + mu, cov = calculate_activation_statistics_np(all_genmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + metrics["FID"] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) + + # Compute diversity + assert count_seq > self.diversity_times + metrics["Diversity"] = calculate_diversity_np(all_genmotions, + self.diversity_times) + metrics["gt_Diversity"] = calculate_diversity_np( + all_gtmotions, self.diversity_times) + + return {**metrics} + + def update( + self, + text_embeddings: Tensor, + recmotion_embeddings: Tensor, + gtmotion_embeddings: Tensor, + lengths: List[int], + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + # [bs, nlatent*ndim] <= [bs, nlatent, ndim] + text_embeddings = torch.flatten(text_embeddings, start_dim=1).detach() + recmotion_embeddings = torch.flatten(recmotion_embeddings, + start_dim=1).detach() + gtmotion_embeddings = torch.flatten(gtmotion_embeddings, + start_dim=1).detach() + + # store all texts and motions + self.text_embeddings.append(text_embeddings) + self.recmotion_embeddings.append(recmotion_embeddings) + self.gtmotion_embeddings.append(gtmotion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/tm2t_R256.py b/Evaluator_272/mld/models/metrics/tm2t_R256.py new file mode 100644 index 0000000000000000000000000000000000000000..6302cdbb1cd131d97ef11e7267cde18749996d15 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/tm2t_R256.py @@ -0,0 +1,167 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class TM2TMetrics_R256(Metric): + full_state_update = True + + def __init__(self, + top_k=10, + R_size=256, + diversity_times=300, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "matching, fid, and diversity scores" + + self.top_k = top_k + self.R_size = R_size + # self.diversity_times = diversity_times + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = [] + # Matching scores + # self.add_state("Matching_score", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + # self.add_state("gt_Matching_score", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + self.Matching_metrics = [] + for k in range(1, top_k + 1): + self.add_state( + f"R_precision_top_{str(k)}_{str(self.R_size)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"R_precision_top_{str(k)}_{str(self.R_size)}") + for k in range(1, top_k + 1): + self.add_state( + f"gt_R_precision_top_{str(k)}_{str(self.R_size)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"gt_R_precision_top_{str(k)}_{str(self.R_size)}") + + self.metrics.extend(self.Matching_metrics) + + # chached batches + self.add_state("text_embeddings", default=[], dist_reduce_fx=None) + self.add_state("recmotion_embeddings", default=[], dist_reduce_fx=None) + self.add_state("gtmotion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # cat all embeddings + shuffle_idx = torch.randperm(count_seq) + all_texts = torch.cat(self.text_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_genmotions = torch.cat(self.recmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_gtmotions = torch.cat(self.gtmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + + # Compute r-precision + assert count_seq > self.R_size + # print("**********************************") + # print(count_seq) + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_genmotions[i * self.R_size:(i + 1) * + self.R_size] + # dist_mat = pairwise_euclidean_distance(group_texts, group_motions) + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # print(dist_mat[:5]) + # self.Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + R_count = count_seq // self.R_size * self.R_size + # metrics["Matching_score"] = self.Matching_score / R_count + for k in range(self.top_k): + metrics[f"R_precision_top_{str(k+1)}_{self.R_size}"] = top_k_mat[k] / R_count + + # Compute r-precision with gt + assert count_seq > self.R_size + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_gtmotions[i * self.R_size:(i + 1) * + self.R_size] + # [bs=32, 32] + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # match score + # self.gt_Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + # metrics["gt_Matching_score"] = self.gt_Matching_score / R_count + for k in range(self.top_k): + metrics[f"gt_R_precision_top_{str(k+1)}_{self.R_size}"] = top_k_mat[k] / R_count + + # tensor -> numpy for FID + # all_genmotions = all_genmotions.numpy() + # all_gtmotions = all_gtmotions.numpy() + + # Compute fid + # mu, cov = calculate_activation_statistics_np(all_genmotions) + # # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + # metrics["FID"] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) + + # Compute diversity + # assert count_seq > self.diversity_times + # metrics["Diversity"] = calculate_diversity_np(all_genmotions, + # self.diversity_times) + # metrics["gt_Diversity"] = calculate_diversity_np( + # all_gtmotions, self.diversity_times) + + return {**metrics} + + def update( + self, + text_embeddings: Tensor, + recmotion_embeddings: Tensor, + gtmotion_embeddings: Tensor, + lengths: List[int], + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + # [bs, nlatent*ndim] <= [bs, nlatent, ndim] + text_embeddings = torch.flatten(text_embeddings, start_dim=1).detach() + recmotion_embeddings = torch.flatten(recmotion_embeddings, + start_dim=1).detach() + gtmotion_embeddings = torch.flatten(gtmotion_embeddings, + start_dim=1).detach() + + # store all texts and motions + self.text_embeddings.append(text_embeddings) + self.recmotion_embeddings.append(recmotion_embeddings) + self.gtmotion_embeddings.append(gtmotion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/tmr_tm2t.py b/Evaluator_272/mld/models/metrics/tmr_tm2t.py new file mode 100644 index 0000000000000000000000000000000000000000..72ca7c79fe80549c991ee57765ac4ca74e94e5ba --- /dev/null +++ b/Evaluator_272/mld/models/metrics/tmr_tm2t.py @@ -0,0 +1,188 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class TMR_TM2TMetrics(Metric): + full_state_update = True + + def __init__(self, + top_k=3, + R_size=32, + diversity_times=300, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "matching" + + self.top_k = top_k + self.R_size = R_size + # self.diversity_times = diversity_times + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = [] + # Matching scores + self.add_state("TMR_Matching_score", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.add_state("TMR_gt_Matching_score", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.Matching_metrics = ["TMR_Matching_score", "TMR_gt_Matching_score"] + for k in range(1, top_k + 1): + self.add_state( + f"TMR_R_precision_top_{str(k)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"TMR_R_precision_top_{str(k)}") + for k in range(1, top_k + 1): + self.add_state( + f"TMR_gt_R_precision_top_{str(k)}", + default=torch.tensor(0.0), + dist_reduce_fx="sum", + ) + self.Matching_metrics.append(f"TMR_gt_R_precision_top_{str(k)}") + + self.metrics.extend(self.Matching_metrics) + + # Fid + # self.add_state("FID", default=torch.tensor(0.0), dist_reduce_fx="sum") + # self.metrics.append("FID") + + # Diversity + # self.add_state("Diversity", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + # self.add_state("gt_Diversity", + # default=torch.tensor(0.0), + # dist_reduce_fx="sum") + # self.metrics.extend(["Diversity", "gt_Diversity"]) + + # chached batches + self.add_state("text_embeddings", default=[], dist_reduce_fx=None) + self.add_state("recmotion_embeddings", default=[], dist_reduce_fx=None) + self.add_state("gtmotion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # cat all embeddings + shuffle_idx = torch.randperm(count_seq) + all_texts = torch.cat(self.text_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_genmotions = torch.cat(self.recmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + all_gtmotions = torch.cat(self.gtmotion_embeddings, + axis=0).cpu()[shuffle_idx, :] + + # Compute r-precision + assert count_seq > self.R_size + + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_genmotions[i * self.R_size:(i + 1) * + self.R_size] + # dist_mat = pairwise_euclidean_distance(group_texts, group_motions) + # [bs=32, 32] + group_texts = torch.nn.functional.normalize(group_texts, dim=1) + group_motions = torch.nn.functional.normalize(group_motions, dim=1) + + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + # print(dist_mat[:5]) + self.TMR_Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + R_count = count_seq // self.R_size * self.R_size + metrics["TMR_Matching_score"] = self.TMR_Matching_score / R_count + for k in range(self.top_k): + metrics[f"TMR_R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # Compute r-precision with gt + assert count_seq > self.R_size + top_k_mat = torch.zeros((self.top_k, )) + for i in range(count_seq // self.R_size): + # [bs=32, 1*256] + group_texts = all_texts[i * self.R_size:(i + 1) * self.R_size] + # [bs=32, 1*256] + group_motions = all_gtmotions[i * self.R_size:(i + 1) * + self.R_size] + # [bs=32, 32] + + group_texts = torch.nn.functional.normalize(group_texts, dim=1) + group_motions = torch.nn.functional.normalize(group_motions, dim=1) + + dist_mat = euclidean_distance_matrix(group_texts, + group_motions).nan_to_num() + + # match score + self.TMR_gt_Matching_score += dist_mat.trace() + argsmax = torch.argsort(dist_mat, dim=1) + top_k_mat += calculate_top_k(argsmax, top_k=self.top_k).sum(axis=0) + metrics["TMR_gt_Matching_score"] = self.TMR_gt_Matching_score / R_count + for k in range(self.top_k): + metrics[f"TMR_gt_R_precision_top_{str(k+1)}"] = top_k_mat[k] / R_count + + # tensor -> numpy for FID + # all_genmotions = all_genmotions.numpy() + # all_gtmotions = all_gtmotions.numpy() + + # Compute fid + # mu, cov = calculate_activation_statistics_np(all_genmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + # metrics["FID"] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) + + # Compute diversity + # assert count_seq > self.diversity_times + # metrics["Diversity"] = calculate_diversity_np(all_genmotions, + # self.diversity_times) + # metrics["gt_Diversity"] = calculate_diversity_np( + # all_gtmotions, self.diversity_times) + + return {**metrics} + + def update( + self, + text_embeddings: Tensor, + recmotion_embeddings: Tensor, + gtmotion_embeddings: Tensor, + lengths: List[int], + ): + + self.count += sum(lengths) + self.count_seq += len(lengths) + + # [bs, nlatent*ndim] <= [bs, nlatent, ndim] + text_embeddings = torch.flatten(text_embeddings, start_dim=1).detach() + recmotion_embeddings = torch.flatten(recmotion_embeddings, + start_dim=1).detach() + gtmotion_embeddings = torch.flatten(gtmotion_embeddings, + start_dim=1).detach() + + # store all texts and motions + self.text_embeddings.append(text_embeddings) + self.recmotion_embeddings.append(recmotion_embeddings) + self.gtmotion_embeddings.append(gtmotion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/uncond.py b/Evaluator_272/mld/models/metrics/uncond.py new file mode 100644 index 0000000000000000000000000000000000000000..ef1cb27c1944b6d57a832bca512b7888233537bd --- /dev/null +++ b/Evaluator_272/mld/models/metrics/uncond.py @@ -0,0 +1,120 @@ +from typing import List + +import torch +from torch import Tensor +from torchmetrics import Metric +from torchmetrics.functional import pairwise_euclidean_distance + +from .utils import * + + +class UncondMetrics(Metric): + full_state_update = True + + def __init__(self, + top_k=3, + R_size=32, + diversity_times=300, + dist_sync_on_step=True, + **kwargs): + super().__init__(dist_sync_on_step=dist_sync_on_step) + + self.name = "fid, kid, and diversity scores" + + self.top_k = top_k + self.R_size = R_size + self.diversity_times = 300 + + self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum") + self.add_state("count_seq", + default=torch.tensor(0), + dist_reduce_fx="sum") + + self.metrics = [] + + # KID + self.add_state("KID_mean", + default=torch.tensor(0.0), + dist_reduce_fx="mean") + self.add_state("KID_std", + default=torch.tensor(0.0), + dist_reduce_fx="mean") + self.metrics.extend(["KID_mean", "KID_std"]) + # Fid + self.add_state("FID", default=torch.tensor(0.0), dist_reduce_fx="mean") + self.metrics.append("FID") + + # Diversity + self.add_state("Diversity", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.add_state("gt_Diversity", + default=torch.tensor(0.0), + dist_reduce_fx="sum") + self.metrics.extend(["Diversity", "gt_Diversity"]) + + # chached batches + self.add_state("recmotion_embeddings", default=[], dist_reduce_fx=None) + self.add_state("gtmotion_embeddings", default=[], dist_reduce_fx=None) + + def compute(self, sanity_flag): + count = self.count.item() + count_seq = self.count_seq.item() + + # init metrics + metrics = {metric: getattr(self, metric) for metric in self.metrics} + + # if in sanity check stage then jump + if sanity_flag: + return metrics + + # cat all embeddings + all_gtmotions = torch.cat(self.gtmotion_embeddings, axis=0).cpu() + all_genmotions = torch.cat(self.recmotion_embeddings, axis=0).cpu() + + # Compute kid + + KID_mean, KID_std = calculate_kid(all_gtmotions, all_genmotions) + metrics["KID_mean"] = KID_mean + metrics["KID_std"] = KID_std + + # tensor -> numpy for FID + all_genmotions = all_genmotions.numpy() + all_gtmotions = all_gtmotions.numpy() + + # Compute fid + mu, cov = calculate_activation_statistics_np(all_genmotions) + # gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + gt_mu, gt_cov = calculate_activation_statistics_np(all_gtmotions) + metrics["FID"] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) + + # Compute diversity + assert count_seq > self.diversity_times + print(all_genmotions.shape) + print(all_gtmotions.shape) + metrics["Diversity"] = calculate_diversity_np(all_genmotions, + self.diversity_times) + metrics["gt_Diversity"] = calculate_diversity_np( + all_gtmotions, self.diversity_times) + + return {**metrics} + + def update( + self, + gtmotion_embeddings: Tensor, + lengths: List[int], + recmotion_embeddings=None, + ): + self.count += sum(lengths) + self.count_seq += len(lengths) + + # [bs, nlatent*ndim] <= [bs, nlatent, ndim] + if recmotion_embeddings is not None: + recmotion_embeddings = torch.flatten(recmotion_embeddings, + start_dim=1).detach() + # store all texts and motions + self.recmotion_embeddings.append(recmotion_embeddings) + gtmotion_embeddings = torch.flatten(gtmotion_embeddings, + start_dim=1).detach() + + self.gtmotion_embeddings.append(gtmotion_embeddings) diff --git a/Evaluator_272/mld/models/metrics/utils.py b/Evaluator_272/mld/models/metrics/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..955e5705732abbe7a1c1d050f72cab8efed26999 --- /dev/null +++ b/Evaluator_272/mld/models/metrics/utils.py @@ -0,0 +1,644 @@ +import numpy as np +import scipy.linalg +import torch +from torch import linalg +import sys + + +def l2_norm(x1, x2, dim): + return torch.linalg.vector_norm(x1 - x2, ord=2, dim=dim) + + +def variance(x, T, dim): + mean = x.mean(dim) + out = (x - mean)**2 + out = out.sum(dim) + return out / (T - 1) + + +def sqrtm(input): + m = input.detach().cpu().numpy().astype(np.float64_) + sqrtm = torch.from_numpy(scipy.linalg.sqrtm(m)).to(input) + return sqrtm + + +# (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train +def euclidean_distance_matrix(matrix1, matrix2): + """ + Params: + -- matrix1: N1 x D + -- matrix2: N2 x D + Returns: + -- dist: N1 x N2 + dist[i, j] == distance(matrix1[i], matrix2[j]) + """ + assert matrix1.shape[1] == matrix2.shape[1] + d1 = -2 * torch.mm(matrix1, matrix2.T) # shape (num_test, num_train) + d2 = torch.sum(torch.square(matrix1), axis=1, + keepdims=True) # shape (num_test, 1) + d3 = torch.sum(torch.square(matrix2), axis=1) # shape (num_train, ) + dists = torch.sqrt(d1 + d2 + d3) # broadcasting + return dists + + +def euclidean_distance_matrix_np(matrix1, matrix2): + """ + Params: + -- matrix1: N1 x D + -- matrix2: N2 x D + Returns: + -- dist: N1 x N2 + dist[i, j] == distance(matrix1[i], matrix2[j]) + """ + assert matrix1.shape[1] == matrix2.shape[1] + d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train) + d2 = np.sum(np.square(matrix1), axis=1, + keepdims=True) # shape (num_test, 1) + d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, ) + dists = np.sqrt(d1 + d2 + d3) # broadcasting + return dists + + +def calculate_top_k(mat, top_k): + size = mat.shape[0] + gt_mat = (torch.unsqueeze(torch.arange(size), + 1).to(mat.device).repeat_interleave(size, 1)) + bool_mat = mat == gt_mat + correct_vec = False + top_k_list = [] + for i in range(top_k): + # print(correct_vec, bool_mat[:, i]) + correct_vec = correct_vec | bool_mat[:, i] + # print(correct_vec) + top_k_list.append(correct_vec[:, None]) + top_k_mat = torch.cat(top_k_list, dim=1) + return top_k_mat + + +def calculate_activation_statistics(activations): + """ + Params: + -- activation: num_samples x dim_feat + Returns: + -- mu: dim_feat + -- sigma: dim_feat x dim_feat + """ + activations = activations.cpu().numpy() + mu = np.mean(activations, axis=0) + sigma = np.cov(activations, rowvar=False) + return mu, sigma + + +def calculate_activation_statistics_np(activations): + """ + Params: + -- activation: num_samples x dim_feat + Returns: + -- mu: dim_feat + -- sigma: dim_feat x dim_feat + """ + mu = np.mean(activations, axis=0) + cov = np.cov(activations, rowvar=False) + return mu, cov + + +# def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): +# """Numpy implementation of the Frechet Distance. +# The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) +# and X_2 ~ N(mu_2, C_2) is +# d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). +# Stable version by Dougal J. Sutherland. +# Params: +# -- mu1 : Numpy array containing the activations of a layer of the +# inception net (like returned by the function 'get_predictions') +# for generated samples. +# -- mu2 : The sample mean over activations, precalculated on an +# representative data set. +# -- sigma1: The covariance matrix over activations for generated samples. +# -- sigma2: The covariance matrix over activations, precalculated on an +# representative data set. +# Returns: +# -- : The Frechet Distance. +# """ + +# mu1 = torch.atleast_1d(mu1) +# mu2 = torch.atleast_1d(mu2) + +# sigma1 = torch.atleast_2d(sigma1) +# sigma2 = torch.atleast_2d(sigma2) + +# assert mu1.shape == mu2.shape, \ +# 'Training and test mean vectors have different lengths' +# assert sigma1.shape == sigma2.shape, \ +# 'Training and test covariances have different dimensions' + +# diff = mu1 - mu2 + +# # Product might be almost singular +# # covmean, _ = sqrtm(sigma1.dot(sigma2), disp=False) +# covmean = sqrtm(torch.mm(sigma1,sigma2)) +# if not torch.isfinite(covmean).all(): +# msg = ('fid calculation produces singular product; ' +# 'adding %s to diagonal of cov estimates') % eps +# print(msg) +# offset = torch.eye(sigma1.shape[0]) * eps +# # covmean = sqrtm((sigma1 + offset).dot(sigma2 + offset)) +# covmean = sqrtm(torch.mm(sigma1+ offset,sigma2+ offset)) + +# # Numerical error might give slight imaginary component +# if torch.is_complex(covmean): +# if not torch.allclose(torch.diagonal(covmean).imag, 0, atol=1e-3): +# m = torch.max(torch.abs(covmean.imag)) +# raise ValueError('Imaginary component {}'.format(m)) +# covmean = covmean.real + +# tr_covmean = torch.trace(covmean) + +# return (diff.dot(diff) + torch.trace(sigma1) + +# torch.trace(sigma2) - 2 * tr_covmean) + + +def calculate_frechet_distance_np(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + Stable version by Dougal J. Sutherland. + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representative data set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representative data set. + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert (mu1.shape == mu2.shape + ), "Training and test mean vectors have different lengths" + assert (sigma1.shape == sigma2.shape + ), "Training and test covariances have different dimensions" + + diff = mu1 - mu2 + # Product might be almost singular + covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ("fid calculation produces singular product; " + "adding %s to diagonal of cov estimates") % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError("Imaginary component {}".format(m)) + # print("Imaginary component {}".format(m)) + covmean = covmean.real + tr_covmean = np.trace(covmean) + + return diff.dot(diff) + np.trace(sigma1) + np.trace( + sigma2) - 2 * tr_covmean + + +def calculate_diversity(activation, diversity_times): + assert len(activation.shape) == 2 + assert activation.shape[0] > diversity_times + num_samples = activation.shape[0] + + first_indices = np.random.choice(num_samples, + diversity_times, + replace=False) + second_indices = np.random.choice(num_samples, + diversity_times, + replace=False) + dist = linalg.norm(activation[first_indices] - activation[second_indices], + axis=1) + return dist.mean() + + +def calculate_diversity_np(activation, diversity_times): + assert len(activation.shape) == 2 + assert activation.shape[0] > diversity_times + num_samples = activation.shape[0] + + first_indices = np.random.choice(num_samples, + diversity_times, + replace=False) + second_indices = np.random.choice(num_samples, + diversity_times, + replace=False) + dist = scipy.linalg.norm(activation[first_indices] - + activation[second_indices], + axis=1) + return dist.mean() + + +def calculate_multimodality_np(activation, multimodality_times): + assert len(activation.shape) == 3 + assert activation.shape[1] > multimodality_times + num_per_sent = activation.shape[1] + + first_dices = np.random.choice(num_per_sent, + multimodality_times, + replace=False) + second_dices = np.random.choice(num_per_sent, + multimodality_times, + replace=False) + dist = scipy.linalg.norm(activation[:, first_dices] - + activation[:, second_dices], + axis=2) + return dist.mean() + + +# motion reconstructions metrics + + +def batch_compute_similarity_transform_torch(S1, S2): + """ + Computes a similarity transform (sR, t) that takes + a set of 3D points S1 (3 x N) closest to a set of 3D points S2, + where R is an 3x3 rotation matrix, t 3x1 translation, s scale. + i.e. solves the orthogonal Procrutes problem. + """ + transposed = False + if S1.shape[0] != 3 and S1.shape[0] != 2: + S1 = S1.permute(0, 2, 1) + S2 = S2.permute(0, 2, 1) + transposed = True + assert S2.shape[1] == S1.shape[1] + + # 1. Remove mean. + mu1 = S1.mean(axis=-1, keepdims=True) + mu2 = S2.mean(axis=-1, keepdims=True) + + X1 = S1 - mu1 + X2 = S2 - mu2 + + # 2. Compute variance of X1 used for scale. + var1 = torch.sum(X1**2, dim=1).sum(dim=1) + + # 3. The outer product of X1 and X2. + K = X1.bmm(X2.permute(0, 2, 1)) + + # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are + # singular vectors of K. + U, s, V = torch.svd(K) + + # Construct Z that fixes the orientation of R to get det(R)=1. + Z = torch.eye(U.shape[1], device=S1.device).unsqueeze(0) + Z = Z.repeat(U.shape[0], 1, 1) + Z[:, -1, -1] *= torch.sign(torch.det(U.bmm(V.permute(0, 2, 1)))) + + # Construct R. + R = V.bmm(Z.bmm(U.permute(0, 2, 1))) + + # 5. Recover scale. + scale = torch.cat([torch.trace(x).unsqueeze(0) for x in R.bmm(K)]) / var1 + + # 6. Recover translation. + t = mu2 - (scale.unsqueeze(-1).unsqueeze(-1) * (R.bmm(mu1))) + + # 7. Error: + S1_hat = scale.unsqueeze(-1).unsqueeze(-1) * R.bmm(S1) + t + + if transposed: + S1_hat = S1_hat.permute(0, 2, 1) + + return S1_hat, (scale, R, t) + + +def compute_mpjpe(preds, + target, + valid_mask=None, + pck_joints=None, + sample_wise=True): + """ + Mean per-joint position error (i.e. mean Euclidean distance) + often referred to as "Protocol #1" in many papers. + """ + assert preds.shape == target.shape, print(preds.shape, + target.shape) # BxJx3 + mpjpe = torch.norm(preds - target, p=2, dim=-1) # BxJ + + if pck_joints is None: + if sample_wise: + mpjpe_seq = ((mpjpe * valid_mask.float()).sum(-1) / + valid_mask.float().sum(-1) + if valid_mask is not None else mpjpe.mean(-1)) + else: + mpjpe_seq = mpjpe[valid_mask] if valid_mask is not None else mpjpe + return mpjpe_seq + else: + mpjpe_pck_seq = mpjpe[:, pck_joints] + return mpjpe_pck_seq + + +def align_by_parts(joints, align_inds=None): + if align_inds is None: + return joints + pelvis = joints[:, align_inds].mean(1) + return joints - torch.unsqueeze(pelvis, dim=1) + +def align_by_parts_hand(joints, align_inds=None): + if align_inds is None: + return joints + + align_inds_left_wrist = align_inds[0] + align_inds_right_wrist = align_inds[1] + + left_wrist = joints[:, align_inds_left_wrist].mean(1) + right_wrist = joints[:, align_inds_right_wrist].mean(1) + result = torch.cat((joints[...,22:22+15,:] - torch.unsqueeze(left_wrist, dim=1), joints[...,22+15:,:] - torch.unsqueeze(right_wrist, dim=1)), dim=-2) + return result + +def calc_mpjpe(preds, target, align_inds=[0], sample_wise=True, trans=None): + # Expects BxJx3 + valid_mask = target[:, :, 0] != -2.0 + + + if align_inds is not None: + preds_aligned = align_by_parts(preds, align_inds=align_inds) + if trans is not None: + preds_aligned += trans + target_aligned = align_by_parts(target, align_inds=align_inds) + else: + preds_aligned, target_aligned = preds, target + mpjpe_each = compute_mpjpe(preds_aligned, + target_aligned, + valid_mask=valid_mask, + sample_wise=sample_wise) + return mpjpe_each + + +def calc_mpjpe_hand(preds, target, align_inds, sample_wise=True, trans=None): + assert len(align_inds) == 2 + # align_inds_left_wrist = align_inds[0] + # align_inds_right_wrist = align_inds[1] + # Expects BxJx3 + valid_mask = target[:, :, 0] != -2.0 + # valid_mask = torch.BoolTensor(target[:, :, 0].shape) + if align_inds is not None: + preds_aligned = align_by_parts_hand(preds, align_inds=align_inds) + if trans is not None: + preds_aligned += trans + target_aligned = align_by_parts_hand(target, align_inds=align_inds) + else: + preds_aligned, target_aligned = preds, target + + + mpjpe_each = compute_mpjpe(preds_aligned[...,-30:,:], + target_aligned[...,-30:,:], + valid_mask=valid_mask[..., -30:], + sample_wise=sample_wise) + return mpjpe_each + + + + +def calc_accel(preds, target): + """ + Mean joint acceleration error + often referred to as "Protocol #1" in many papers. + """ + assert preds.shape == target.shape, print(preds.shape, + target.shape) # BxJx3 + assert preds.dim() == 3 + # Expects BxJx3 + # valid_mask = torch.BoolTensor(target[:, :, 0].shape) + accel_gt = target[:-2] - 2 * target[1:-1] + target[2:] + accel_pred = preds[:-2] - 2 * preds[1:-1] + preds[2:] + normed = torch.linalg.norm(accel_pred - accel_gt, dim=-1) + accel_seq = normed.mean(1) + return accel_seq + + +def calc_pampjpe(preds, target, sample_wise=True, return_transform_mat=False): + # Expects BxJx3 + target, preds = target.float(), preds.float() + # extracting the keypoints that all samples have valid annotations + # valid_mask = (target[:, :, 0] != -2.).sum(0) == len(target) + # preds_tranformed, PA_transform = batch_compute_similarity_transform_torch(preds[:, valid_mask], target[:, valid_mask]) + # pa_mpjpe_each = compute_mpjpe(preds_tranformed, target[:, valid_mask], sample_wise=sample_wise) + + preds_tranformed, PA_transform = batch_compute_similarity_transform_torch( + preds, target) + pa_mpjpe_each = compute_mpjpe(preds_tranformed, + target, + sample_wise=sample_wise) + + if return_transform_mat: + return pa_mpjpe_each, PA_transform + else: + return pa_mpjpe_each + + +# from action2motion +def calculate_diversity_multimodality(activations, + labels, + num_labels, + diversity_times=200, + multimodality_times=20): + labels = labels.long() + num_motions = activations.shape[0] # len(labels) + + diversity = 0 + + first_indices = np.random.randint(0, num_motions, diversity_times) + second_indices = np.random.randint(0, num_motions, diversity_times) + for first_idx, second_idx in zip(first_indices, second_indices): + diversity += torch.dist(activations[first_idx, :], + activations[second_idx, :]) + diversity /= diversity_times + + multimodality = 0 + label_quotas = np.zeros(num_labels) + label_quotas[labels.unique( + )] = multimodality_times # if a label does not appear in batch, its quota remains zero + while np.any(label_quotas > 0): + # print(label_quotas) + first_idx = np.random.randint(0, num_motions) + first_label = labels[first_idx] + if not label_quotas[first_label]: + continue + + second_idx = np.random.randint(0, num_motions) + second_label = labels[second_idx] + while first_label != second_label: + second_idx = np.random.randint(0, num_motions) + second_label = labels[second_idx] + + label_quotas[first_label] -= 1 + + first_activation = activations[first_idx, :] + second_activation = activations[second_idx, :] + multimodality += torch.dist(first_activation, second_activation) + + multimodality /= (multimodality_times * num_labels) + + return diversity, multimodality + + +def calculate_fid(statistics_1, statistics_2): + return calculate_frechet_distance_np(statistics_1[0], statistics_1[1], + statistics_2[0], statistics_2[1]) + + +# from: https://github.com/abdulfatir/gan-metrics-pytorch/blob/master/kid_score.py +def polynomial_mmd_averages(codes_g, + codes_r, + n_subsets=50, + subset_size=1000, + ret_var=True, + output=sys.stdout, + **kernel_args): + m = min(codes_g.shape[0], codes_r.shape[0]) + mmds = np.zeros(n_subsets) + if ret_var: + vars = np.zeros(n_subsets) + choice = np.random.choice + + replace = subset_size < len(codes_g) + + for i in range(n_subsets): + g = codes_g[choice(len(codes_g), subset_size, replace=replace)] + r = codes_r[choice(len(codes_r), subset_size, replace=replace)] + o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var) + if ret_var: + mmds[i], vars[i] = o + else: + mmds[i] = o + + return (mmds, vars) if ret_var else mmds + + +def polynomial_mmd(codes_g, + codes_r, + degree=3, + gamma=None, + coef0=1, + var_at_m=None, + ret_var=True): + from sklearn.metrics.pairwise import polynomial_kernel + + # use k(x, y) = (gamma + coef0)^degree + # default gamma is 1 / dim + X = codes_g + Y = codes_r + + K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0) + K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0) + K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0) + + return _mmd2_and_variance(K_XX, + K_XY, + K_YY, + var_at_m=var_at_m, + ret_var=ret_var) + + +def _mmd2_and_variance(K_XX, + K_XY, + K_YY, + unit_diagonal=False, + mmd_est='unbiased', + block_size=1024, + var_at_m=None, + ret_var=True): + # based on + # https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py + # but changed to not compute the full kernel matrix at once + m = K_XX.shape[0] + assert K_XX.shape == (m, m) + assert K_XY.shape == (m, m) + assert K_YY.shape == (m, m) + if var_at_m is None: + var_at_m = m + + # Get the various sums of kernels that we'll use + # Kts drop the diagonal, but we don't need to compute them explicitly + if unit_diagonal: + diag_X = diag_Y = 1 + sum_diag_X = sum_diag_Y = m + sum_diag2_X = sum_diag2_Y = m + else: + diag_X = np.diagonal(K_XX) + diag_Y = np.diagonal(K_YY) + + sum_diag_X = diag_X.sum() + sum_diag_Y = diag_Y.sum() + + sum_diag2_X = _sqn(diag_X) + sum_diag2_Y = _sqn(diag_Y) + + Kt_XX_sums = K_XX.sum(axis=1) - diag_X + Kt_YY_sums = K_YY.sum(axis=1) - diag_Y + K_XY_sums_0 = K_XY.sum(axis=0) + K_XY_sums_1 = K_XY.sum(axis=1) + + Kt_XX_sum = Kt_XX_sums.sum() + Kt_YY_sum = Kt_YY_sums.sum() + K_XY_sum = K_XY_sums_0.sum() + + if mmd_est == 'biased': + mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m) + (Kt_YY_sum + sum_diag_Y) / + (m * m) - 2 * K_XY_sum / (m * m)) + else: + assert mmd_est in {'unbiased', 'u-statistic'} + mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m - 1)) + if mmd_est == 'unbiased': + mmd2 -= 2 * K_XY_sum / (m * m) + else: + mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m - 1)) + + if not ret_var: + return mmd2 + + Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X + Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y + K_XY_2_sum = _sqn(K_XY) + + dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1) + dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0) + + m1 = m - 1 + m2 = m - 2 + zeta1_est = ( + 1 / (m * m1 * m2) * + (_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum) - 1 / + (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2) + 1 / (m * m * m1) * + (_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum) - + 2 / m**4 * K_XY_sum**2 - 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX) + + 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum) + zeta2_est = (1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum) - 1 / (m * m1)**2 * + (Kt_XX_sum**2 + Kt_YY_sum**2) + 2 / (m * m) * K_XY_2_sum - + 2 / m**4 * K_XY_sum**2 - 4 / (m * m * m1) * + (dot_XX_XY + dot_YY_YX) + 4 / (m**3 * m1) * + (Kt_XX_sum + Kt_YY_sum) * K_XY_sum) + var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est + + 2 / (var_at_m * (var_at_m - 1)) * zeta2_est) + + return mmd2, var_est + + +def _sqn(arr): + flat = np.ravel(arr) + return flat.dot(flat) + + +def calculate_kid(real_activations, generated_activations): + kid_values = polynomial_mmd_averages(real_activations, + generated_activations, + n_subsets=100) + results = (kid_values[0].mean(), kid_values[0].std()) + return results diff --git a/Evaluator_272/mld/models/modeltype/__init__.py b/Evaluator_272/mld/models/modeltype/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/modeltype/base.py b/Evaluator_272/mld/models/modeltype/base.py new file mode 100644 index 0000000000000000000000000000000000000000..493119bf6bee4167f99455c45404adbc0e183e48 --- /dev/null +++ b/Evaluator_272/mld/models/modeltype/base.py @@ -0,0 +1,424 @@ +import os +from pathlib import Path +import numpy as np +import torch +from pytorch_lightning import LightningModule +# from mld.models.metrics import ComputeMetrics, MRMetrics, TM2TMetrics, TM2TMetrics_R256, MMMetrics, HUMANACTMetrics, UESTCMetrics, UncondMetrics, ComputeMetrics_body_hand, MRMetrics_body_hand, ACCMetrics, TMR_TM2TMetrics +from mld.models.metrics import TMR_TM2TMetrics +from os.path import join as pjoin +from collections import OrderedDict + + +class BaseModel(LightningModule): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.times = [] + + def __post_init__(self): + trainable, nontrainable = 0, 0 + for p in self.parameters(): + if p.requires_grad: + trainable += np.prod(p.size()) + else: + nontrainable += np.prod(p.size()) + + self.hparams.n_params_trainable = trainable + self.hparams.n_params_nontrainable = nontrainable + + def training_step(self, batch, batch_idx): + return self.allsplit_step("train", batch, batch_idx) + + def validation_step(self, batch, batch_idx): + return self.allsplit_step("val", batch, batch_idx) + + def test_step(self, batch, batch_idx): + if len(self.times) *self.cfg.TEST.BATCH_SIZE % (100) > 0 and len(self.times) > 0: + print(f"Average time per sample ({self.cfg.TEST.BATCH_SIZE*len(self.times)}): ", np.mean(self.times)/self.cfg.TEST.BATCH_SIZE) + return self.allsplit_step("test", batch, batch_idx) + + def predict_step(self, batch, batch_idx): + return self.forward(batch) + + def allsplit_epoch_end(self, split: str, outputs): + dico = {} + + if split in ["train", "val"]: + losses = self.losses[split] + loss_dict = losses.compute(split) + losses.reset() + dico.update({ + losses.loss2logname(loss, split): value.item() + for loss, value in loss_dict.items() if not torch.isnan(value) + }) + + if split in ["val", "test"]: + + if self.trainer.datamodule.is_mm and ("TM2TMetrics" in self.metrics_dict or "TM2TMetrics_R256" in self.metrics_dict): + metrics_dicts = ['MMMetrics'] + else: + metrics_dicts = self.metrics_dict + for metric in metrics_dicts: + metrics_dict = getattr( + self, + metric).compute(sanity_flag=self.trainer.sanity_checking) + # reset metrics + getattr(self, metric).reset() + dico.update({ + f"Metrics/{metric}": value.item() + for metric, value in metrics_dict.items() + }) + if split != "test": + dico.update({ + "epoch": float(self.trainer.current_epoch), + "step": float(self.trainer.current_epoch), + }) + # don't write sanity check into log + if not self.trainer.sanity_checking: + self.log_dict(dico, sync_dist=True, rank_zero_only=True) + + def training_epoch_end(self, outputs): + return self.allsplit_epoch_end("train", outputs) + + def validation_epoch_end(self, outputs): + # # ToDo + # # re-write vislization checkpoint? + # # visualize validation + # parameters = {"xx",xx} + # vis_path = viz_epoch(self, dataset, epoch, parameters, module=None, + # folder=parameters["folder"], writer=None, exps=f"_{dataset_val.dataset_name}_"+val_set) + return self.allsplit_epoch_end("val", outputs) + + def test_epoch_end(self, outputs): + self.save_npy(outputs) + self.cfg.TEST.REP_I = self.cfg.TEST.REP_I + 1 + + return self.allsplit_epoch_end("test", outputs) + + def on_save_checkpoint(self, checkpoint): + # don't save clip to checkpoint + state_dict = checkpoint['state_dict'] + clip_k = [] + for k, v in state_dict.items(): + if 'text_encoder' in k: + clip_k.append(k) + for k in clip_k: + del checkpoint['state_dict'][k] + + def on_load_checkpoint(self, checkpoint): + # restore clip state_dict to checkpoint + clip_state_dict = self.text_encoder.state_dict() + new_state_dict = OrderedDict() + for k, v in clip_state_dict.items(): + new_state_dict['text_encoder.' + k] = v + for k, v in checkpoint['state_dict'].items(): + if 'text_encoder' not in k: + new_state_dict[k] = v + checkpoint['state_dict'] = new_state_dict + + def load_state_dict(self, state_dict, strict=True): + # load clip state_dict to checkpoint + if hasattr(self, 'text_encoder'): + clip_state_dict = self.text_encoder.state_dict() + new_state_dict = OrderedDict() + for k, v in clip_state_dict.items(): + new_state_dict['text_encoder.' + k] = v + for k, v in state_dict.items(): + if 'text_encoder' not in k: + new_state_dict[k] = v + else: + new_state_dict = state_dict + + super().load_state_dict(new_state_dict, strict) + + def configure_optimizers(self): + return {"optimizer": self.optimizer} + + def configure_metrics(self): + for metric in self.metrics_dict: + if metric == "TemosMetric": + self.TemosMetric = ComputeMetrics( + njoints=self.njoints, + jointstype=self.cfg.DATASET.JOINT_TYPE, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + + elif metric == "TemosMetric_body_hand": + self.TemosMetric_body_hand = ComputeMetrics_body_hand( + njoints=self.njoints, + jointstype=self.cfg.DATASET.JOINT_TYPE, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + + elif metric == "TM2TMetrics": + self.TM2TMetrics = TM2TMetrics( + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == 'TM2TMetrics_R256': + self.TM2TMetrics_R256 = TM2TMetrics_R256( + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == 'TMR_TM2TMetrics': + self.TMR_TM2TMetrics = TMR_TM2TMetrics( + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == "MRMetrics": + self.MRMetrics = MRMetrics( + njoints=self.njoints, + jointstype=self.cfg.DATASET.JOINT_TYPE, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + + elif metric == "MRMetrics_body_hand": + self.MRMetrics_body_hand = MRMetrics_body_hand( + njoints=self.njoints, + jointstype=self.cfg.DATASET.JOINT_TYPE, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + + elif metric == "HUMANACTMetrics": + self.HUMANACTMetrics = HUMANACTMetrics( + datapath=os.path.join(self.cfg.model.humanact12_rec_path, + "humanact12_gru.tar"), + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + multimodality_times=self.cfg.TEST.MM_NUM_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == "UESTCMetrics": + self.UESTCMetrics = UESTCMetrics( + cfg=self.cfg, + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + multimodality_times=self.cfg.TEST.MM_NUM_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == "UncondMetrics": + self.UncondMetrics = UncondMetrics( + diversity_times=30 + if self.debug else self.cfg.TEST.DIVERSITY_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + elif metric == "ACCMetrics": + self.ACCMetrics = ACCMetrics(dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP) + else: + raise NotImplementedError( + f"Do not support Metric Type {metric}") + if "TM2TMetrics" in self.metrics_dict or "UncondMetrics" in self.metrics_dict or "TM2TMetrics_R256" in self.metrics_dict: + self.MMMetrics = MMMetrics( + mm_num_times=self.cfg.TEST.MM_NUM_TIMES, + dist_sync_on_step=self.cfg.METRIC.DIST_SYNC_ON_STEP, + ) + + def save_npy(self, outputs): + + cfg = self.cfg + output_dir = Path( + os.path.join( + cfg.FOLDER, + str(cfg.model.model_type), + str(cfg.NAME), + "samples", + )) + + if cfg.TEST.SAVE_PREDICTIONS and cfg.TEST.REP_I + 1 == cfg.TEST.REPLICATION_TIMES: + if cfg.TEST.inference_vq_code: + if self.vae_type in ["hvq", "hvq_body_hand"]: + name = [i[2] for i in outputs] + motion_code_t = [i[0] for i in outputs] + motion_code_b = [i[1] for i in outputs] + else: + name = [i[1] for i in outputs] + outputs = [i[0] for i in outputs] + + else: + if cfg.DATASET.MOTION_TYPE == 'vector_263': + lengths = [i[1] for i in outputs] + texts = [i[2] for i in outputs] + outputs = [i[0] for i in outputs] + elif cfg.DATASET.MOTION_TYPE == 'smplx_212': + if cfg.TRAIN.use_joints: + lengths = [i[1] for i in outputs] + gen_motions = [self.datamodule.renormt2m_back(i[0]) for i in outputs] + ref_motions = [self.datamodule.renormt2m_back(i[2]) for i in outputs] + else: + return + elif cfg.DATASET.MOTION_TYPE in ['ric_rot']: + lengths = [i[1] for i in outputs] + gen_motions = [i[0] for i in outputs] + ref_motions = [i[2] for i in outputs] + else: + raise NotImplementedError + + if cfg.TEST.DATASETS[0].lower() in ["humanml3d", "kit"]: + if cfg.TEST.inference_vq_code: + for i in range(len(outputs)): + if self.vae_type in ["hvq", "hvq_body_hand"]: + for bid in range( + min(cfg.TEST.BATCH_SIZE, motion_code_t[i].shape[0])): + + motion_vqcode_t = motion_code_t[i][bid].cpu().numpy()[None, :] + motion_vqcode_b = motion_code_b[i][bid].cpu().numpy()[None, :] + motion_name = name[i][bid] + + assert cfg.TEST.REPLICATION_TIMES == 1 + + motion_name = f"{motion_name}.npy" + output_dir_t = Path( + os.path.join(f'./datasets/{cfg.TEST.DATASETS[0]}/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_t')) + output_dir_b = Path( + os.path.join(f'./datasets/{cfg.TEST.DATASETS[0]}/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_b')) + # save predictions results + npypath_t = output_dir_t / motion_name + npypath_b = output_dir_b / motion_name + + np.save(npypath_t, motion_vqcode_t) + np.save(npypath_b, motion_vqcode_b) + + + + else: + for bid in range( + min(cfg.TEST.BATCH_SIZE, outputs[i].shape[0])): + motion_vqcode = outputs[i][bid].cpu().numpy()[None, :] + motion_name = name[i][bid] + + assert cfg.TEST.REPLICATION_TIMES == 1 + + motion_name = f"{motion_name}.npy" + output_dir = Path( + os.path.join(f'./datasets/{cfg.TEST.DATASETS[0]}/vq_tokens', str(cfg.model.vae_type))) + # save predictions results + npypath = output_dir / motion_name + np.save(npypath, motion_vqcode) + + + else: + keyids = self.trainer.datamodule.test_dataset.name_list + for i in range(len(outputs)): + for bid in range( + min(cfg.TEST.BATCH_SIZE, outputs[i].shape[0])): + keyid = keyids[i * cfg.TEST.BATCH_SIZE + bid] + gen_joints = outputs[i][bid].cpu().numpy() + text = texts[i][bid] + + if cfg.TEST.REPLICATION_TIMES > 1: + name = f"{keyid}_{cfg.TEST.REP_I}" + else: + name = f"{keyid}.npy" + # save predictions results + npypath = output_dir / name + np.save(npypath, gen_joints) + + textpath = output_dir / 'text' / (name + '.txt') + os.makedirs(os.path.split(textpath)[0], exist_ok=True) + with open(textpath, "w") as f: + f.write(text) + elif cfg.TEST.DATASETS[0].lower() in ["humanact12", "uestc"]: + keyids = range(len(self.trainer.datamodule.test_dataset)) + for i in range(len(outputs)): + for bid in range( + min(cfg.TEST.BATCH_SIZE, outputs[i].shape[0])): + keyid = keyids[i * cfg.TEST.BATCH_SIZE + bid] + gen_joints = outputs[i][bid].cpu() + gen_joints = gen_joints.permute(2, 0, + 1)[:lengths[i][bid], + ...].numpy() + if cfg.TEST.REPLICATION_TIMES > 1: + name = f"{keyid}_{cfg.TEST.REP_I}" + else: + name = f"{keyid}.npy" + # save predictions results + npypath = output_dir / name + np.save(npypath, gen_joints) + elif cfg.TEST.DATASETS[0].lower() in ["motionx", 'motionx_v26']: + + + if cfg.TEST.inference_vq_code: + for i in range(len(outputs)): + if self.vae_type in ["hvq", "hvq_body_hand"]: + for bid in range( + min(cfg.TEST.BATCH_SIZE, motion_code_t[i].shape[0])): + motion_vqcode_t = motion_code_t[i][bid].cpu().numpy()[None, :] + motion_vqcode_b = motion_code_b[i][bid].cpu().numpy()[None, :] + motion_name = name[i][bid] + + assert cfg.TEST.REPLICATION_TIMES == 1 + + motion_name = f"{motion_name}.npy" + if cfg.TEST.DATASETS[0].lower() == 'motionx_v26': + output_dir_t = Path( + os.path.join(f'./datasets/Motion-X-V26/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_t')) + output_dir_b = Path( + os.path.join(f'./datasets/Motion-X-V26/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_b')) + elif cfg.TEST.DATASETS[0].lower() == 'motionx': + output_dir_t = Path( + os.path.join(f'./datasets/Motion-X/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_t')) + output_dir_b = Path( + os.path.join(f'./datasets/Motion-X/vq_tokens', str(cfg.model.vae_type), 'motion_vqcode_b')) + else: + raise NotImplementedError + # save predictions results + + npypath_t = output_dir_t / motion_name + npypath_b = output_dir_b / motion_name + + npypath_t_ref_parent_directory = os.path.dirname(npypath_t) + if not os.path.exists(npypath_t_ref_parent_directory): + os.makedirs(npypath_t_ref_parent_directory) + + npypath_b_parent_directory = os.path.dirname(npypath_b) + if not os.path.exists(npypath_b_parent_directory): + os.makedirs(npypath_b_parent_directory) + + np.save(npypath_t, motion_vqcode_t) + np.save(npypath_b, motion_vqcode_b) + + + + + else: + for bid in range( + min(cfg.TEST.BATCH_SIZE, outputs[i].shape[0])): + motion_vqcode = outputs[i][bid].cpu().numpy()[None, :] + motion_name = name[i][bid] + + assert cfg.TEST.REPLICATION_TIMES == 1 + + motion_name = f"{motion_name}.npy" + output_dir = Path( + os.path.join(f'./datasets/Motion-X/vq_tokens', str(cfg.model.vae_type))) + # save predictions results + + npypath = output_dir / motion_name + npypath_parent_directory = os.path.dirname(npypath) + if not os.path.exists(npypath_parent_directory): + os.makedirs(npypath_parent_directory) + np.save(npypath, motion_vqcode) + + + + else: + + keyids = self.trainer.datamodule.test_dataset.name_list + for i in range(len(gen_motions)): + for bid in range( + min(cfg.TEST.BATCH_SIZE, gen_motions[i].shape[0])): + keyid = keyids[i * cfg.TEST.BATCH_SIZE + bid] + gen_joints = gen_motions[i][bid].cpu().numpy() + ref_joints = ref_motions[i][bid].cpu().numpy() + + gen_name = f"{keyid}.npy" + ref_name = f"{keyid}_gt.npy" + # save predictions results + npypath = output_dir / gen_name + os.makedirs(os.path.split(npypath)[0], exist_ok=True) + np.save(npypath, gen_joints) + + diff --git a/Evaluator_272/mld/models/modeltype/mld.py b/Evaluator_272/mld/models/modeltype/mld.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc1d99837201a4271b74d47696f2262fd9cd14e --- /dev/null +++ b/Evaluator_272/mld/models/modeltype/mld.py @@ -0,0 +1,3001 @@ +import inspect +import os +from mld.transforms.rotation2xyz import Rotation2xyz +import numpy as np +import torch +from torch import Tensor +from torch.optim import AdamW +from torchmetrics import MetricCollection +import time +from mld.config import instantiate_from_config +from os.path import join as pjoin +from mld.models.architectures import ( + mld_denoiser, + mld_dual_vae, + mld_vae, + vposert_vae, + t2m_motionenc, + t2m_textenc, + vposert_vae, +) +from mld.models.losses.mld import MLDLosses, MLDLosses_no_joint +from mld.models.losses.vqvae import VQVAELosses +from mld.models.modeltype.base import BaseModel +from mld.utils.temos_utils import remove_padding + +from mld.models.architectures.temos.textencoder.distillbert_actor import DistilbertActorAgnosticEncoder +from mld.models.architectures.temos.motionencoder.actor import ActorAgnosticEncoder + +from .base import BaseModel +from .smplx_layer import smplx_layer + +from ..body_skeleton.skeleton import Skeleton +from ..body_skeleton.paramUtil import * + +from collections import OrderedDict +from sentence_transformers import SentenceTransformer + +import copy + + +class MLD(BaseModel): + """ + Stage 1 vae + Stage 2 diffusion + """ + + def __init__(self, cfg, datamodule, **kwargs): + super().__init__() + + self.cfg = cfg + self.stage = cfg.TRAIN.STAGE + self.condition = cfg.model.condition + self.is_vae = cfg.model.vae + self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON + self.nfeats = cfg.DATASET.NFEATS + self.njoints = cfg.DATASET.NJOINTS + self.debug = cfg.DEBUG + self.latent_dim = cfg.model.latent_dim + self.guidance_scale = cfg.model.guidance_scale + self.guidance_uncodp = cfg.model.guidance_uncondp + self.datamodule = datamodule + + if 'MINOR_MOTION_TYPE' in cfg.DATASET: + self.input_format = cfg.DATASET.MINOR_MOTION_TYPE + else: + self.input_format = cfg.DATASET.MOTION_TYPE + + self.motion_type = cfg.DATASET.MOTION_TYPE + + self.eval_on_text = cfg.EVAL.eval_on_text + # + try: + self.vae_type = cfg.model.vae_type + except: + self.vae_type = cfg.model.motion_vae.target.split( + ".")[-1].lower().replace("vae", "") + + self.text_encoder = instantiate_from_config(cfg.model.text_encoder) + + self.smplx_model = smplx_layer() + + self.smplx_model.eval() + for p in self.smplx_model.parameters(): + p.requires_grad = False + + if self.vae_type != "no": + # + self.vae = instantiate_from_config(cfg.model.motion_vae) + + # Don't train the motion encoder and decoder + if self.stage == "diffusion": + if self.vae_type in ["mld", "vposert", "actor", "humanvq"]: + self.vae.training = False + for p in self.vae.parameters(): + p.requires_grad = False + elif self.vae_type == "no": + pass + else: + self.motion_encoder.training = False + for p in self.motion_encoder.parameters(): + p.requires_grad = False + self.motion_decoder.training = False + for p in self.motion_decoder.parameters(): + p.requires_grad = False + + self.denoiser = instantiate_from_config(cfg.model.denoiser) + if not self.predict_epsilon: + cfg.model.scheduler.params['prediction_type'] = 'sample' + cfg.model.noise_scheduler.params['prediction_type'] = 'sample' + self.scheduler = instantiate_from_config(cfg.model.scheduler) + self.noise_scheduler = instantiate_from_config( + cfg.model.noise_scheduler) + + if cfg.EVAL.eval_on_text: + if self.condition in ["text", "text_uncond", 'text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion']: + self._get_t2m_evaluator(cfg) + + if cfg.EVAL.use_tmr_eval: + if self.condition in ["text", "text_uncond", 'text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion']: + self._get_tmr_t2m_evaluator(cfg) + + if cfg.TRAIN.OPTIM.TYPE.lower() == "adamw": + self.optimizer = AdamW(lr=cfg.TRAIN.OPTIM.LR, + params=self.parameters()) + else: + raise NotImplementedError( + "Do not support other optimizer for now.") + + if cfg.LOSS.TYPE == "mld": + # assert cfg.DATASET.MOTION_TYPE in ['vector_263', 'root_position'] + self._losses = MetricCollection({ + split: MLDLosses(vae=self.is_vae, mode="xyz", cfg=cfg) + for split in ["losses_train", "losses_test", "losses_val"] + }) + + elif cfg.LOSS.TYPE == "vqvae": + + self._losses = MetricCollection({ + split: VQVAELosses(vae=self.is_vae, mode="xyz", cfg=cfg) + for split in ["losses_train", "losses_test", "losses_val"] + }) + + elif cfg.LOSS.TYPE == 'mld_no_joint': + # assert 'smpl' not in cfg.DATASET.MOTION_TYPE + self._losses = MetricCollection({ + split: MLDLosses_no_joint(vae=self.is_vae, mode="xyz", cfg=cfg) + for split in ["losses_train", "losses_test", "losses_val"] + }) + + else: + raise NotImplementedError( + "MotionCross model only supports mld losses.") + + # if cfg.LOSS.TYPE == 'mld_no_joint': + # assert cfg.TRAIN.use_joints == False + + self.losses = { + key: self._losses["losses_" + key] + for key in ["train", "test", "val"] + } + + self.metrics_dict = cfg.METRIC.TYPE + self.configure_metrics() + + # If we want to overide it at testing time + + if eval("self.cfg.TRAIN.DATASETS")[0].lower() == 'humanml3d': + n_raw_offsets = torch.from_numpy(t2m_raw_offsets) + kinematic_chain = t2m_kinematic_chain + elif eval("self.cfg.TRAIN.DATASETS")[0].lower() == 'kit': + n_raw_offsets = torch.from_numpy(kit_raw_offsets) + kinematic_chain = kit_kinematic_chain + elif eval("self.cfg.TRAIN.DATASETS")[0].lower() in ['motionx', 'motionx_v25', 'motionx_v26']: + n_raw_offsets = torch.from_numpy(t2m_raw_body_hand_offsets) + body_raw_offsets = n_raw_offsets[:22] + hand_raw_offsets = n_raw_offsets[22:] + kinematic_chain = t2m_body_hand_kinematic_chain + body_kinemantic_chain = t2m_kinematic_chain + hand_kinemantic_chain = t2m_left_hand_chain + t2m_right_hand_chain + else: + raise NotImplementedError + + + self.skel=None + if self.input_format in ['root_rot6d']: + example_data = np.load(os.path.join('./HumanML3D-1/joints', '000021' + '.npy')) + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain) + # (joints_num, 3) + tgt_offsets = tgt_skel.get_offsets_joints(example_data[0]) + self.skel = Skeleton(n_raw_offsets, kinematic_chain) + self.skel.set_offset(tgt_offsets) + + elif self.input_format in ['root_body_pos_vel_hand_rot']: + + example_data = np.load('./datasets/Motion-X/motion_data/joint/humanml/000021.npy') + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + + example_data = example_data[:, :52] + + body_example_data = example_data[:, :22] + tgt_body_skel = Skeleton(body_raw_offsets, body_kinemantic_chain) + + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain) + + # (joints_num, 3) + tgt_body_skel_offsets = tgt_body_skel.get_offsets_joints(body_example_data[0]) + tgt_skel_offsets = tgt_skel.get_offsets_joints(example_data[0]) + + body_skel = Skeleton(body_raw_offsets, body_kinemantic_chain) + all_skel = Skeleton(n_raw_offsets, kinematic_chain) + + body_skel.set_offset(tgt_body_skel_offsets) + all_skel.set_offset(tgt_skel_offsets) + + self.skel = (body_skel, all_skel) + # self.skel.set_offset(tgt_offsets) + + + + + self.sample_mean = False + self.fact = None + self.do_classifier_free_guidance = self.guidance_scale > 1.0 + if self.condition in ['text', 'text_uncond', "text_all", 'text_body', 'text_hand', 'text_face_body', 'text_face', "text_seperate", "only_pose_concat", "only_pose_fusion"]: + self.feats2joints = datamodule.feats2joints + self.renorm2ori = datamodule.renorm2ori + if self.cfg.model.vae_type == 'hvq_body_hand_face': + self.facerenorm2ori = datamodule.facerenorm2ori + elif self.condition == 'action': + self.rot2xyz = Rotation2xyz(smpl_path=cfg.DATASET.SMPL_PATH) + self.feats2joints_eval = lambda sample, mask: self.rot2xyz( + sample.view(*sample.shape[:-1], 6, 25).permute(0, 3, 2, 1), + mask=mask, + pose_rep='rot6d', + glob=True, + translation=True, + jointstype='smpl', + vertstrans=True, + betas=None, + beta=0, + glob_rot=None, + get_rotations_back=False) + self.feats2joints = lambda sample, mask: self.rot2xyz( + sample.view(*sample.shape[:-1], 6, 25).permute(0, 3, 2, 1), + mask=mask, + pose_rep='rot6d', + glob=True, + translation=True, + jointstype='vertices', + vertstrans=False, + betas=None, + beta=0, + glob_rot=None, + get_rotations_back=False) + + def _get_t2m_evaluator(self, cfg): + """ + load T2M text encoder and motion encoder for evaluating + """ + + + # init module + if cfg.model.eval_text_source == 'token': + + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCo(word_size=cfg.model.t2m_textencoder.dim_word, + pos_size=cfg.model.t2m_textencoder.dim_pos_ohot, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + elif cfg.model.eval_text_source == 'only_text_token': + + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCoV2(word_size=cfg.model.t2m_textencoder.dim_word, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + + elif cfg.model.eval_text_source in ['caption']: + + + if cfg.model.eval_text_encode_way == 'clip': + self.t2m_textencoder, clip_preprocess = clip.load("ViT-B/32", device=opt.device, jit=False) # Must set jit=False for training + clip.model.convert_weights(text_enc)# Actually this line is unnecessary since clip by default already on float16 + self.t2m_textencoder.eval() + for p in text_enc.parameters(): + p.requires_grad = False + + elif cfg.model.eval_text_encode_way == 't5': + os.environ["TOKENIZERS_PARALLELISM"] = "false" + self.t2m_textencoder = SentenceTransformer('sentence-transformers/sentence-t5-xl').to(opt.device) + self.t2m_textencoder.eval() + for p in self.t2m_textencoder.parameters(): + p.requires_grad = False + + elif 'GRU' in cfg.model.eval_text_encode_way: + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCoV2(word_size=cfg.model.t2m_textencoder.dim_word, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + else: + raise NotImplementedError + + + + if cfg.DATASET.MOTION_TYPE in ['vector_263', 'ric_rot', 'vector_263_ori_humanml']: + self.t2m_moveencoder = t2m_motionenc.MovementConvEncoder( + input_size=cfg.DATASET.NFEATS - 4, + hidden_size=cfg.model.t2m_motionencoder.dim_move_hidden, + output_size=cfg.model.t2m_motionencoder.dim_move_latent, + ) + elif cfg.DATASET.MOTION_TYPE in ['smplx_212', 'smplx_159']: + self.t2m_moveencoder = t2m_motionenc.MovementConvEncoder( + input_size=cfg.DATASET.NFEATS, + hidden_size=cfg.model.t2m_motionencoder.dim_move_hidden, + output_size=cfg.model.t2m_motionencoder.dim_move_latent, + ) + + else: + raise NotImplementedError + + self.t2m_motionencoder = t2m_motionenc.MotionEncoderBiGRUCo( + input_size=cfg.model.t2m_motionencoder.dim_move_latent, + hidden_size=cfg.model.t2m_motionencoder.dim_motion_hidden, + output_size=cfg.model.t2m_motionencoder.dim_motion_latent, + ) + + # load pretrianed + dataname = cfg.TEST.DATASETS[0] + dataname = "t2m" if dataname == "humanml3d" else dataname + # t2m_checkpoint = torch.load( + # os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, + # "text_mot_match_glove_6B_caption_bs_256/model/finest.tar")) + + + minor_motin_type = cfg.DATASET.MINOR_MOTION_TYPE if 'MINOR_MOTION_TYPE' in cfg.DATASET else '' + + + if dataname in ['motionx', 'motionx_v25', 'motionx_v26']: + + if 'TEXT_TYPE' in cfg.DATASET: + if cfg.DATASET.TEXT_TYPE == 'vicuna1.5_13b': + + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, minor_motin_type, + "text_mot_match_glove_6B_caption_bs_256_text_vicuna1.5/model/finest.tar"), map_location=torch.device('cpu')) + elif cfg.DATASET.TEXT_TYPE == 'vicuna1.5_13b_add_subject': + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, minor_motin_type, + "text_mot_match_glove_6B_caption_bs_256_text_vicuna1.5_add_subject/model/finest.tar"), map_location=torch.device('cpu')) + + else: + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, minor_motin_type, + "text_mot_match_glove_6B_caption_bs_256/model/finest.tar"), map_location=torch.device('cpu')) + else: + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, + "text_mot_match/model/finest.tar"), map_location=torch.device('cpu')) + + self.t2m_textencoder.load_state_dict(t2m_checkpoint["text_encoder"]) + + self.t2m_moveencoder.load_state_dict( + t2m_checkpoint["movement_encoder"]) + + + self.t2m_motionencoder.load_state_dict( + t2m_checkpoint["motion_encoder"]) + + # freeze params + self.t2m_textencoder.eval() + self.t2m_moveencoder.eval() + self.t2m_motionencoder.eval() + for p in self.t2m_textencoder.parameters(): + p.requires_grad = False + for p in self.t2m_moveencoder.parameters(): + p.requires_grad = False + for p in self.t2m_motionencoder.parameters(): + p.requires_grad = False + + + def _get_tmr_t2m_evaluator(self, cfg): + """ + load tmr T2M text encoder and motion encoder for evaluating + """ + + assert cfg.model.eval_text_source in ['caption'] + + self.t2m_TMR_textencoder_eval = DistilbertActorAgnosticEncoder('distilbert-base-uncased', num_layers=4) + self.t2m_TMR_motionencoder_eval = ActorAgnosticEncoder(nfeats=cfg.DATASET.NFEATS, vae =True, num_layers=4) + + + # load pretrianed + dataname = cfg.TEST.DATASETS[0] + dataname = "t2m" if dataname == "humanml3d" else dataname + # t2m_checkpoint = torch.load( + # os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, + # "text_mot_match_glove_6B_caption_bs_256/model/finest.tar")) + + + minor_motin_type = cfg.DATASET.MINOR_MOTION_TYPE if 'MINOR_MOTION_TYPE' in cfg.DATASET else '' + if dataname in ['motionx', 'motionx_v25', 'motionx_v26']: + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, cfg.DATASET.VERSION, cfg.DATASET.MOTION_TYPE, minor_motin_type, "TMR_pretrain_new/epoch=59.ckpt"), map_location=torch.device('cpu')) + state_dict = t2m_checkpoint["state_dict"] + else: + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, + "text_mot_match/model/finest.tar"), map_location=torch.device('cpu')) + + tmr_textencoder_dict = OrderedDict() + for k, v in state_dict.items(): + # print(k) + if k.split(".")[0] == "textencoder": + name = k.replace("textencoder.", "") + tmr_textencoder_dict[name] = v + + self.t2m_TMR_textencoder_eval.load_state_dict(tmr_textencoder_dict, strict=True) + + + tmr_motionencoder_dict = OrderedDict() + for k, v in state_dict.items(): + # print(k) + if k.split(".")[0] == "motionencoder": + name = k.replace("motionencoder.", "") + tmr_motionencoder_dict[name] = v + + self.t2m_TMR_motionencoder_eval.load_state_dict(tmr_motionencoder_dict, strict=True) + + # freeze params + self.t2m_TMR_textencoder_eval.freeze() + self.t2m_TMR_motionencoder_eval.freeze() + self.t2m_TMR_textencoder_eval.eval() + self.t2m_TMR_motionencoder_eval.eval() + for p in self.t2m_TMR_textencoder_eval.parameters(): + p.requires_grad = False + for p in self.t2m_TMR_motionencoder_eval.parameters(): + p.requires_grad = False + + def sample_from_distribution( + self, + dist, + *, + fact=None, + sample_mean=False, + ) -> Tensor: + fact = fact if fact is not None else self.fact + sample_mean = sample_mean if sample_mean is not None else self.sample_mean + + if sample_mean: + return dist.loc.unsqueeze(0) + + # Reparameterization trick + if fact is None: + return dist.rsample().unsqueeze(0) + + # Resclale the eps + eps = dist.rsample() - dist.loc + z = dist.loc + fact * eps + + # add latent size + z = z.unsqueeze(0) + return z + + def forward(self, batch): + texts = batch["text"] + lengths = batch["length"] + if self.cfg.TEST.COUNT_TIME: + self.starttime = time.time() + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + motions = batch['motion'] + z, dist_m = self.vae.encode(motions, lengths) + + with torch.no_grad(): + # ToDo change mcross actor to same api + if self.vae_type in ["mld","actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + if self.cfg.TEST.COUNT_TIME: + self.endtime = time.time() + elapsed = self.endtime - self.starttime + self.times.append(elapsed) + if len(self.times) % 100 == 0: + meantime = np.mean( + self.times[-100:]) / self.cfg.TEST.BATCH_SIZE + print( + f'100 iter mean Time (batch_size: {self.cfg.TEST.BATCH_SIZE}): {meantime}', + ) + if len(self.times) % 1000 == 0: + meantime = np.mean( + self.times[-1000:]) / self.cfg.TEST.BATCH_SIZE + print( + f'1000 iter mean Time (batch_size: {self.cfg.TEST.BATCH_SIZE}): {meantime}', + ) + with open(pjoin(self.cfg.FOLDER_EXP, 'times.txt'), 'w') as f: + for line in self.times: + f.write(str(line)) + f.write('\n') + joints = self.feats2joints(feats_rst.detach().cpu()) + return remove_padding(joints, lengths) + + def gen_from_latent(self, batch): + z = batch["latent"] + lengths = batch["length"] + + feats_rst = self.vae.decode(z, lengths) + + # feats => joints + joints = self.feats2joints(feats_rst.detach().cpu()) + return remove_padding(joints, lengths) + + def recon_from_motion(self, batch): + feats_ref = batch["motion"] + length = batch["length"] + + z, dist = self.vae.encode(feats_ref, length) + feats_rst = self.vae.decode(z, length) + + # feats => joints + joints = self.feats2joints(feats_rst.detach().cpu()) + joints_ref = self.feats2joints(feats_ref.detach().cpu()) + return remove_padding(joints, + length), remove_padding(joints_ref, length) + + def _diffusion_reverse(self, encoder_hidden_states, lengths=None): + # init latents + bsz = encoder_hidden_states.shape[0] + if self.do_classifier_free_guidance: + bsz = bsz // 2 + if self.vae_type == "no": + assert lengths is not None, "no vae (diffusion only) need lengths for diffusion" + latents = torch.randn( + (bsz, max(lengths), self.cfg.DATASET.NFEATS), + device=encoder_hidden_states.device, + dtype=torch.float, + ) + else: + latents = torch.randn( + (bsz, self.latent_dim[0], self.latent_dim[-1]), + device=encoder_hidden_states.device, + dtype=torch.float, + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + # set timesteps + self.scheduler.set_timesteps( + self.cfg.model.scheduler.num_inference_timesteps) + timesteps = self.scheduler.timesteps.to(encoder_hidden_states.device) + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, and between [0, 1] + extra_step_kwargs = {} + if "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys()): + extra_step_kwargs["eta"] = self.cfg.model.scheduler.eta + + # reverse + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = (torch.cat( + [latents] * + 2) if self.do_classifier_free_guidance else latents) + lengths_reverse = (lengths * 2 if self.do_classifier_free_guidance + else lengths) + # latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + # predict the noise residual + noise_pred = self.denoiser( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=encoder_hidden_states, + lengths=lengths_reverse, + )[0] + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * ( + noise_pred_text - noise_pred_uncond) + # text_embeddings_for_guidance = encoder_hidden_states.chunk( + # 2)[1] if self.do_classifier_free_guidance else encoder_hidden_states + latents = self.scheduler.step(noise_pred, t, latents, + **extra_step_kwargs).prev_sample + # if self.predict_epsilon: + # latents = self.scheduler.step(noise_pred, t, latents, + # **extra_step_kwargs).prev_sample + # else: + # # predict x for standard diffusion model + # # compute the previous noisy sample x_t -> x_t-1 + # latents = self.scheduler.step(noise_pred, + # t, + # latents, + # **extra_step_kwargs).prev_sample + + # [batch_size, 1, latent_dim] -> [1, batch_size, latent_dim] + latents = latents.permute(1, 0, 2) + return latents + + def _diffusion_reverse_tsne(self, encoder_hidden_states, lengths=None): + # init latents + bsz = encoder_hidden_states.shape[0] + if self.do_classifier_free_guidance: + bsz = bsz // 2 + if self.vae_type == "no": + assert lengths is not None, "no vae (diffusion only) need lengths for diffusion" + latents = torch.randn( + (bsz, max(lengths), self.cfg.DATASET.NFEATS), + device=encoder_hidden_states.device, + dtype=torch.float, + ) + else: + latents = torch.randn( + (bsz, self.latent_dim[0], self.latent_dim[-1]), + device=encoder_hidden_states.device, + dtype=torch.float, + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + # set timesteps + self.scheduler.set_timesteps( + self.cfg.model.scheduler.num_inference_timesteps) + timesteps = self.scheduler.timesteps.to(encoder_hidden_states.device) + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, and between [0, 1] + extra_step_kwargs = {} + if "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys()): + extra_step_kwargs["eta"] = self.cfg.model.scheduler.eta + + # reverse + latents_t = [] + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = (torch.cat( + [latents] * + 2) if self.do_classifier_free_guidance else latents) + lengths_reverse = (lengths * 2 if self.do_classifier_free_guidance + else lengths) + # latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + # predict the noise residual + noise_pred = self.denoiser( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=encoder_hidden_states, + lengths=lengths_reverse, + )[0] + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * ( + noise_pred_text - noise_pred_uncond) + # text_embeddings_for_guidance = encoder_hidden_states.chunk( + # 2)[1] if self.do_classifier_free_guidance else encoder_hidden_states + latents = self.scheduler.step(noise_pred, t, latents, + **extra_step_kwargs).prev_sample + # [batch_size, 1, latent_dim] -> [1, batch_size, latent_dim] + latents_t.append(latents.permute(1,0,2)) + # [1, batch_size, latent_dim] -> [t, batch_size, latent_dim] + latents_t = torch.cat(latents_t) + return latents_t + + def _diffusion_process(self, latents, encoder_hidden_states, lengths=None): + """ + heavily from https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py + """ + # our latent [batch_size, n_token=1 or 5 or 10, latent_dim=256] + # sd latent [batch_size, [n_token0=64,n_token1=64], latent_dim=4] + # [n_token, batch_size, latent_dim] -> [batch_size, n_token, latent_dim] + latents = latents.permute(1, 0, 2) + + # Sample noise that we'll add to the latents + # [batch_size, n_token, latent_dim] + noise = torch.randn_like(latents) + bsz = latents.shape[0] + # Sample a random timestep for each motion + timesteps = torch.randint( + 0, + self.noise_scheduler.config.num_train_timesteps, + (bsz, ), + device=latents.device, + ) + timesteps = timesteps.long() + # Add noise to the latents according to the noise magnitude at each timestep + noisy_latents = self.noise_scheduler.add_noise(latents.clone(), noise, + timesteps) + # Predict the noise residual + noise_pred = self.denoiser( + sample=noisy_latents, + timestep=timesteps, + encoder_hidden_states=encoder_hidden_states, + lengths=lengths, + return_dict=False, + )[0] + # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. + if self.cfg.LOSS.LAMBDA_PRIOR != 0.0: + noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) + noise, noise_prior = torch.chunk(noise, 2, dim=0) + else: + noise_pred_prior = 0 + noise_prior = 0 + n_set = { + "noise": noise, + "noise_prior": noise_prior, + "noise_pred": noise_pred, + "noise_pred_prior": noise_pred_prior, + } + if not self.predict_epsilon: + n_set["pred"] = noise_pred + n_set["latent"] = latents + return n_set + + + + + + def train_vae_forward(self, batch): + feats_ref = batch["motion"] + lengths = batch["length"] + + if self.vae_type in ["hvq_body_hand_face"]: + face_ref = batch["face_motion"] + + + joint_mask = batch["joint_mask"] + + if self.vae_type in ["mld", "vposert", "actor"]: + motion_z, dist_m = self.vae.encode(feats_ref, lengths) + feats_rst = self.vae.decode(motion_z, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst, (commit_x, commit_x_d), perplexity = self.vae.forward(feats_ref) + elif self.vae_type in ["hvq"]: + feats_rst, (commit_x_t, commit_x_d_t), (commit_x_b, commit_x_d_b) = self.vae.forward(feats_ref) + elif self.vae_type in ["hvq_body_hand"]: + feats_rst, (commit_x_t, commit_x_d_t), (commit_x_b, commit_x_d_b) = self.vae.forward(feats_ref) + elif self.vae_type in ["hvq_body_hand_face"]: + feats_rst, (commit_x_f, commit_x_d_f), (commit_x_t, commit_x_d_t), (commit_x_b, commit_x_d_b) = self.vae.forward(torch.cat((feats_ref, face_ref), dim=2)) + face_rst = feats_rst[:, :, -53:] + feats_rst = feats_rst[:, :, :-53] + elif self.vae_type in ["rvq"]: + feats_rst, (commit_x, commit_x_d1, commit_x_d2), perplexity = self.vae.forward(feats_ref) + + elif self.vae_type in ["mld_dual_vae"]: + body_motion_z, hand_motion_z, body_dist_m, hand_dist_m = self.vae.encode(feats_ref, lengths) + feats_rst = self.vae.decode(body_motion_z, hand_motion_z, lengths) + elif self.vae_type in ["dual_human_vq"]: + feats_rst, (body_commit_x, body_commit_x_d), (hand_commit_x, hand_commit_x_d), body_perplexity, hand_perplexity = self.vae.forward(feats_ref) + else: + raise TypeError("vae_type must be mcross or actor") + + # prepare for metric + if self.vae_type in ["mld", "vposert", "actor"]: + recons_z, dist_rm = self.vae.encode(feats_rst, lengths) + elif self.vae_type in ["mld_dual_vae"]: + body_recons_z, hand_recons_z, body_dist_rm, hand_dist_rm = self.vae.encode(feats_ref, lengths) + + # joints recover + if self.condition in ["text", "text_all", 'text_hand', 'text_body', 'text_face', "text_seperate", "only_pose_concat", "only_pose_fusion"]: + + if self.input_format in ['vector_263', 'vector_263_ori_humanml', 'root_position', 'root_position_vel', 'root_position_rot6d', 'all', 'root_body_pos_vel_hand_all', 'root_body_pos_vel_hand_pos_vel', 'root_body_pos_vel_hand_pos', 'root_position_vel_only_body', 'root_body_pos_vel_hand_pos_vel_hand_wrist']: + joints_rst = self.feats2joints(feats_rst, self.input_format) # feats_rst.shape (bs, seq, 67) joints_rst.shape (bs, seq, 22, 3) + joints_ref = self.feats2joints(feats_ref, self.input_format) + elif self.input_format in ['root_rot6d']: + joints_rst = self.feats2joints(feats_rst, skel=self.skel, motion_type=self.input_format) + joints_rst = joints_rst.view(feats_rst.shape[0], feats_rst.shape[1], self.njoints, 3) + joints_ref = self.feats2joints(feats_ref, skel=self.skel, motion_type=self.input_format) + joints_ref = joints_ref.view(feats_ref.shape[0], feats_ref.shape[1], self.njoints, 3) + elif self.input_format in ['smplx_212', 'smplx_159'] and self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.input_format, self.smplx_model) + joints_ref = self.feats2joints(feats_ref, self.input_format, self.smplx_model) + elif self.input_format == 'root_body_pos_vel_hand_rot': + + joints_rst = self.feats2joints(feats_rst, skel=self.skel, motion_type=self.input_format) + joints_rst = joints_rst.view(feats_rst.shape[0], feats_rst.shape[1], self.njoints, 3) + joints_ref = self.feats2joints(feats_ref, skel=self.skel, motion_type=self.input_format) + joints_ref = joints_ref.view(feats_ref.shape[0], feats_ref.shape[1], self.njoints, 3) + elif self.input_format in ['smplx_212', 'smplx_159'] and (not self.cfg.TRAIN.use_joints): + pass + + else: + raise NotImplementedError + + elif self.condition == "action": + mask = batch["mask"] + joints_rst = self.feats2joints(feats_rst, mask) + joints_ref = self.feats2joints(feats_ref, mask) + + if self.vae_type in ["mld", "vposert", "actor"]: + if dist_m is not None: + if self.is_vae: + # Create a centred normal distribution to compare with + mu_ref = torch.zeros_like(dist_m.loc) + scale_ref = torch.ones_like(dist_m.scale) + dist_ref = torch.distributions.Normal(mu_ref, scale_ref) + else: + dist_ref = dist_m + + elif self.vae_type in ["mld_dual_vae"]: + if body_dist_m is not None: + if self.is_vae: + # Create a centred normal distribution to compare with + body_mu_ref = torch.zeros_like(body_dist_m.loc) + body_scale_ref = torch.ones_like(body_dist_m.scale) + body_dist_ref = torch.distributions.Normal(body_mu_ref, body_scale_ref) + else: + body_dist_ref = body_dist_m + + if hand_dist_m is not None: + if self.is_vae: + # Create a centred normal distribution to compare with + hand_mu_ref = torch.zeros_like(hand_dist_m.loc) + hand_scale_ref = torch.ones_like(hand_dist_m.scale) + hand_dist_ref = torch.distributions.Normal(hand_mu_ref, hand_scale_ref) + else: + hand_dist_ref = hand_dist_m + + + # cut longer part over max length + min_len = min(feats_ref.shape[1], feats_rst.shape[1]) + + if self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + "commit_x": commit_x, + "commit_x_d": commit_x_d + } + + return rs_set + + elif self.vae_type in ["rvq"]: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + "commit_x": commit_x, + "commit_x_d1": commit_x_d1, + "commit_x_d2": commit_x_d2 + } + + return rs_set + + + elif self.vae_type in ["dual_human_vq"]: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + "body_commit_x": body_commit_x, + "hand_commit_x": hand_commit_x, + "body_commit_x_d": body_commit_x_d, + "hand_commit_x_d": hand_commit_x_d, + } + + + return rs_set + + + elif self.vae_type in ["hvq", "hvq_body_hand"]: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + "commit_x_t": commit_x_t, + "commit_x_d_t": commit_x_d_t, + "commit_x_b": commit_x_b , + "commit_x_d_b": commit_x_d_b, + # "" + } + + elif self.vae_type in ['hvq_body_hand_face']: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + "fm_ref": face_ref[:, :min_len, :], + "fm_rst": face_rst[:, :min_len, :], + "commit_x_t": commit_x_t, + "commit_x_d_t": commit_x_d_t, + "commit_x_b": commit_x_b , + "commit_x_d_b": commit_x_d_b, + "commit_x_f": commit_x_f, + "commit_x_d_f": commit_x_d_f + # "" + } + + # return rs_set + + + if self.vae_type in ['mld_dual_vae']: + + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "body_lat_m": body_motion_z.permute(1, 0, 2), + "hand_lat_m": hand_motion_z.permute(1, 0, 2), + "body_lat_rm": body_recons_z.permute(1, 0, 2), + "hand_lat_rm": hand_recons_z.permute(1, 0, 2), + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "body_dist_m": body_dist_m, + "hand_dist_m": hand_dist_m, + "body_dist_ref": body_dist_ref, + "hand_dist_ref": hand_dist_ref, + } + else: + + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "body_lat_m": body_motion_z.permute(1, 0, 2), + "hand_lat_m": hand_motion_z.permute(1, 0, 2), + "body_lat_rm": body_recons_z.permute(1, 0, 2), + "hand_lat_rm": hand_recons_z.permute(1, 0, 2), + "body_dist_m": body_dist_m, + "hand_dist_m": hand_dist_m, + "body_dist_ref": body_dist_ref, + "hand_dist_ref": hand_dist_ref, + } + + # return rs_set + + elif self.vae_type in ["mld"]: + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "lat_m": motion_z.permute(1, 0, 2), + "lat_rm": recons_z.permute(1, 0, 2), + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "dist_m": dist_m, + "dist_ref": dist_ref, + } + else: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "lat_m": motion_z.permute(1, 0, 2), + "lat_rm": recons_z.permute(1, 0, 2), + "dist_m": dist_m, + "dist_ref": dist_ref, + } + + else: + if self.cfg.TRAIN.use_joints: + # rs_set = { + # "m_ref": feats_ref[:, :min_len, :], + # "m_rst": feats_rst[:, :min_len, :], + # # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + # "lat_m": motion_z.permute(1, 0, 2), + # "lat_rm": recons_z.permute(1, 0, 2), + # "joints_ref": joints_ref, + # "joints_rst": joints_rst, + # "dist_m": dist_m, + # "dist_ref": dist_ref, + # } + rs_set["joints_ref"] = joints_ref + rs_set["joints_rst"] = joints_rst + else: + rs_set = { + "m_ref": feats_ref[:, :min_len, :], + "m_rst": feats_rst[:, :min_len, :], + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "lat_m": motion_z.permute(1, 0, 2), + "lat_rm": recons_z.permute(1, 0, 2), + "dist_m": dist_m, + "dist_ref": dist_ref, + } + + + if self.cfg.LOSS.hand_mask: + rs_set['joint_mask'] = batch['joint_mask'][:, :min_len, :] + + + if self.cfg.LOSS.Velocity_loss: + vel_ref = feats_ref[:, :min_len, :][:, 1:, 3:] - feats_ref[:, :min_len, :][:, :-1, 3:] + vel_rst = feats_rst[:, :min_len, :][:, 1:, 3:] - feats_rst[:, :min_len, :][:, :-1, 3:] + rs_set['vel_rst'] = vel_rst + rs_set['vel_ref'] = vel_ref + + + return rs_set + + def train_diffusion_forward(self, batch): + feats_ref = batch["motion"] + lengths = batch["length"] + # motion encode + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist = self.vae.encode(feats_ref, lengths) + elif self.vae_type == "no": + z = feats_ref.permute(1, 0, 2) + else: + raise TypeError("vae_type must be mcross or actor") + + if self.condition in ["text", "text_uncond"]: + text = batch["text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + elif self.condition in ["text_all"]: + text = [] + + for i in range(len(batch["text"])): + text.append(batch["text"][i] +' ' + batch['face_text'][i] + ' ' + batch["body_text"][i] + ' ' + batch["hand_text"][i]) + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + elif self.condition in ["text_face"]: + text = [] + + for i in range(len(batch["text"])): + text.append(batch["text"][i] +' ' + batch['face_text'][i]) + + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + elif self.condition in ["text_body"]: + text = [] + + for i in range(len(batch["text"])): + text.append(batch["text"][i] +' ' + batch['body_text'][i]) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + elif self.condition in ["text_hand"]: + text = [] + + for i in range(len(batch["text"])): + text.append(batch["text"][i] +' ' + batch['hand_text'][i]) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + + elif self.condition in ['text_face_body']: + text = [] + + for i in range(len(batch["text"])): + text.append(batch["text"][i] +' ' + batch['face_text'][i] + ' ' + batch["body_text"][i]) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + + elif self.condition in ["text_seperate"]: + + text = [] + for i in range(len(batch["text"])): + text.append((batch["text"][i], batch["face_text"][i], batch["body_text"][i], batch["hand_text"][i])) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + ("", "", "", "") if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + + semantic_text = [] + face_text = [] + body_text = [] + hand_text = [] + for i in range(len(text)): + semantic_text.append(text[i][0]) + face_text.append(text[i][1]) + body_text.append(text[i][2]) + hand_text.append(text[i][3]) + + cond_emb_semantic = self.text_encoder(semantic_text) + cond_emb_face = self.text_encoder(face_text) + cond_emb_body = self.text_encoder(body_text) + cond_emb_hand = self.text_encoder(hand_text) + + cond_emb = self.linear_fusion(cond_emb_semantic, cond_emb_face, cond_emb_body, cond_emb_hand) + + elif self.condition in ["only_pose_concat"]: + text = [] + for i in range(len(batch["text"])): + text.append(batch["face_text"][i] +' ' + batch["body_text"][i] + ' ' + batch["hand_text"][i]) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + "" if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + cond_emb = self.text_encoder(text) + + elif self.condition in ["only_pose_fusion"]: + + text = [] + for i in range(len(batch["text"])): + text.append((batch["face_text"][i], batch["body_text"][i], batch["hand_text"][i])) + + # text = batch["text"] +' ' + batch["body_text"] + ' ' + batch["hand_text"] + # classifier free guidance: randomly drop text during training + text = [ + ("", "", "") if np.random.rand(1) < self.guidance_uncodp else i + for i in text + ] + # text encode + + face_text = [] + body_text = [] + hand_text = [] + for i in range(len(text)): + face_text.append(text[i][0]) + body_text.append(text[i][1]) + hand_text.append(text[i][2]) + + cond_emb_face = self.text_encoder(face_text) + cond_emb_body = self.text_encoder(body_text) + cond_emb_hand = self.text_encoder(hand_text) + + + cond_emb = self.linear_fusion(None,cond_emb_face, cond_emb_body, cond_emb_hand) + # emb_cat = torch.cat((cond_emb_face, cond_emb_body), axis=1) + # emb_cat = emb_cat.view(emb_cat.size(0), -1) + # cond_emb = self.emb_fuse(emb_cat).unsqueeze(1) + + + elif self.condition in ['action']: + action = batch['action'] + # text encode + cond_emb = action + else: + raise TypeError(f"condition type {self.condition} not supported") + + # diffusion process return with noise and noise_pred + n_set = self._diffusion_process(z, cond_emb, lengths) + return {**n_set} + + def test_diffusion_forward(self, batch, finetune_decoder=False): + lengths = batch["length"] + + if self.condition in ["text", "text_uncond"]: + # get text embeddings + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(lengths) + if self.condition == 'text': + texts = batch["text"] + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + cond_emb = self.text_encoder(texts) + elif self.condition in ['action']: + cond_emb = batch['action'] + if self.do_classifier_free_guidance: + cond_emb = torch.cat( + cond_emb, + torch.zeros_like(batch['action'], + dtype=batch['action'].dtype)) + else: + raise TypeError(f"condition type {self.condition} not supported") + + # diffusion reverse + with torch.no_grad(): + z = self._diffusion_reverse(cond_emb, lengths) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("vae_type must be mcross or actor or mld") + + joints_rst = self.feats2joints(feats_rst) + + rs_set = { + "m_rst": feats_rst, + # [bs, ntoken, nfeats]<= [ntoken, bs, nfeats] + "lat_t": z.permute(1, 0, 2), + "joints_rst": joints_rst, + } + + # prepare gt/refer for metric + if "motion" in batch.keys() and not finetune_decoder: + feats_ref = batch["motion"].detach() + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + motion_z, dist_m = self.vae.encode(feats_ref, lengths) + recons_z, dist_rm = self.vae.encode(feats_rst, lengths) + elif self.vae_type == "no": + motion_z = feats_ref + recons_z = feats_rst + + joints_ref = self.feats2joints(feats_ref) + + rs_set["m_ref"] = feats_ref + rs_set["lat_m"] = motion_z.permute(1, 0, 2) + rs_set["lat_rm"] = recons_z.permute(1, 0, 2) + rs_set["joints_ref"] = joints_ref + return rs_set + + def t2m_eval(self, batch): + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + + if self.vae_type in ["hvq_body_hand_face"]: + face_ref = batch["face_motion"] + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + _, _, _, _, id_t, id_b = self.vae.encode(motions) + elif self.vae_type in ["rvq"]: + quants_1, quants_2 = self.vae.encode(motions) + elif self.vae_type in ["dual_human_vq"]: + body_quants, hand_quants = self.vae.encode(motions) + elif self.vae_type == "hvq_body_hand_face": + _, _, _, _, _, _, id_f, id_t, id_b = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + feats_rst = self.vae.forward_decoder(id_t, id_b) + elif self.vae_type in ["hvq_body_hand_face"]: + feats_rst = self.vae.forward_decoder(id_f, id_t, id_b) + face_rst = feats_rst[:, :, -53:] + feats_rst = feats_rst[:, :, :-53] + + elif self.vae_type in ["rvq"]: + feats_rst = self.vae.forward_decoder(quants_1, quants_2) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["dual_human_vq"]: + feats_rst = self.vae.forward_decoder(body_quants, hand_quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("Not supported vae type!") + + # end time + end = time.time() + self.times.append(end - start) + # joints recover + joints_rst = self.feats2joints(feats_rst, self.input_format) + joints_ref = self.feats2joints(motions, self.input_format) + + + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + recons_mov = self.t2m_moveencoder(feats_rst[..., :-4]).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions[..., :-4]).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + + + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + return rs_set + + + def tmr_t2m_eval(self, batch): + + texts = batch["text"] + texts_ori = copy.deepcopy(batch["text"]) + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + + name = batch["retrieval_name"] + + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + texts_ori = texts_ori * self.cfg.TEST.MM_NUM_REPEATS + + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) # 1, 30 , 256 + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + _, _, _, _, id_t, id_b = self.vae.encode(motions) + elif self.vae_type in ["dual_human_vq"]: + body_quants, hand_quants = self.vae.encode(motions) + elif self.vae_type in ["rvq"]: + quants_1, quants_2 = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) # 30, 180, 313 + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + feats_rst = self.vae.forward_decoder(id_t, id_b) + elif self.vae_type in ["rvq"]: + feats_rst = self.vae.forward_decoder(quants_1, quants_2) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["dual_human_vq"]: + feats_rst = self.vae.forward_decoder(body_quants, hand_quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("Not supported vae type!") + + # end time + end = time.time() + self.times.append(end - start) + # joints recover + joints_rst = self.feats2joints(feats_rst, self.input_format) + joints_ref = self.feats2joints(motions, self.input_format) + + + + # renorm for t2m evaluators + feats_rst_before_renorm4t2m = feats_rst.clone() + motions_before_renorm4t2m = motions.clone() + + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens_ori = m_lens.clone() + feats_rst_before_renorm4t2m = feats_rst_before_renorm4t2m[align_idx] + motions_before_renorm4t2m = motions_before_renorm4t2m[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + recons_mov = self.t2m_moveencoder(feats_rst[..., :-4]).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions[..., :-4]).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + recons_emb_tmr = self.t2m_TMR_motionencoder_eval(feats_rst_before_renorm4t2m, m_lens_ori).loc + motion_emb_tmr = self.t2m_TMR_motionencoder_eval(motions_before_renorm4t2m, m_lens_ori).loc + + + + # t2m text encoder + assert self.cfg.model.eval_text_source in ['caption'] + + + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] # 30 ,512 + else: + raise NotImplementedError + + + + text_emb_tmr = self.t2m_TMR_textencoder_eval(texts_ori).loc[align_idx] # 30 , 256 + + + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_t_tmr": text_emb_tmr, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "lat_m_tmr": motion_emb_tmr, + "lat_rm_tmr": recons_emb_tmr, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + + + return rs_set + + def t2m_eval_save_motion_token(self, batch): + + name = batch["name"] + motions = batch["motion"].detach().clone() + + # start + start = time.time() + + + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + _, _, _, _, id_t, id_b = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + feats_rst = self.vae.forward_decoder(id_t, id_b) + elif self.vae_type in ["rvq"]: + feats_rst = self.vae.forward_decoder(quants_1, quants_2) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + + joints_rst = self.feats2joints(feats_rst, skel=self.skel, motion_type=self.input_format) + + joints_rst = joints_rst.view(feats_rst.shape[0], feats_rst.shape[1], self.njoints, 3) + joints_ref = self.feats2joints(motions, skel=self.skel, motion_type=self.input_format) + joints_ref = joints_ref.view(motions.shape[0], motions.shape[1], self.njoints, 3) + + assert len(name) == 1 + + feats_rst = self.renorm2ori(feats_rst) + motions = self.renorm2ori(motions) + feats_rst_path = os.path.join(f"./visualization/visualization/{self.cfg.model.vae_type}_VAE_motionx_feats_rst_norm_back", name[0] + '.npy') + feats_ref_path = os.path.join(f"./visualization/visualization/{self.cfg.model.vae_type}_VAE_motionx_feats_ref_norm_back", name[0] + '.npy') + joitns_rst_path = os.path.join(f"./visualization/visualization/{self.cfg.model.vae_type}_VAE_motionx_joints_rst_norm_back", name[0] + '.npy') + joitns_ref_path = os.path.join(f"./visualization/visualization/{self.cfg.model.vae_type}_VAE_motionx_joints_ref_norm_back", name[0] + '.npy') + + feats_rst_parent_directory = os.path.dirname(feats_rst_path) + if not os.path.exists(feats_rst_parent_directory): + os.makedirs(feats_rst_parent_directory) + + feats_ref_parent_directory = os.path.dirname(feats_ref_path) + if not os.path.exists(feats_ref_parent_directory): + os.makedirs(feats_ref_parent_directory) + + joints_rst_parent_directory = os.path.dirname(joitns_rst_path) + if not os.path.exists(joints_rst_parent_directory): + os.makedirs(joints_rst_parent_directory) + + joints_ref_parent_directory = os.path.dirname(joitns_ref_path) + if not os.path.exists(joints_ref_parent_directory): + os.makedirs(joints_ref_parent_directory) + + + + np.save(feats_rst_path, feats_rst[0].detach().cpu().numpy()) + np.save(feats_ref_path, motions[0].detach().cpu().numpy()) + np.save(joitns_rst_path, joints_rst[0].detach().cpu().numpy()) + np.save(joitns_ref_path, joints_ref[0].detach().cpu().numpy()) + + + if self.vae_type in ["hvq", "hvq_body_hand"]: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "motion_code_t": id_t, + "motion_code_b": id_b, + "name": name + } + + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "motion_code": quants, + "name": name + } + return rs_set + + + def t2m_eval_cal_sort(self, batch): + + name = batch["name"] + motions = batch["motion"].detach().clone() + + start = time.time() + + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + _, _, _, _, id_t, id_b = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + feats_rst = self.vae.forward_decoder(id_t, id_b) + elif self.vae_type in ["rvq"]: + feats_rst = self.vae.forward_decoder(quants_1, quants_2) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + + joints_rst = self.feats2joints(feats_rst, skel=self.skel, motion_type=self.input_format) + + joints_rst = joints_rst.view(feats_rst.shape[0], feats_rst.shape[1], self.njoints, 3) + joints_ref = self.feats2joints(motions, skel=self.skel, motion_type=self.input_format) + joints_ref = joints_ref.view(motions.shape[0], motions.shape[1], self.njoints, 3) + + feats_rst = self.renorm2ori(feats_rst) + motions = self.renorm2ori(motions) + + + assert len(name) == 1 + + + if self.vae_type in ["hvq", "hvq_body_hand"]: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "motion_code_t": id_t, + "motion_code_b": id_b + } + + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "motion_code": quants + } + return rs_set + + def normal_eval(self, batch): + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + elif self.vae_type in ["mld_dual_vae"]: + body_z, hand_z, body_dist_m, hand_dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["dual_human_vq"]: + body_quants, hand_quants = self.vae.encode(motions) + elif self.vae_type in ["rvq"]: + quants_1, quants_2 = self.vae.encode(motions) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + _, _, _, _, id_t, id_b = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["mld_dual_vae"]: + feats_rst = self.vae.decode(body_z, hand_z, lengths) + elif self.vae_type in ["dual_human_vq"]: + feats_rst = self.vae.forward_decoder(body_quants, hand_quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["rvq"]: + feats_rst = self.vae.forward_decoder(quants_1, quants_2) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type in ["hvq", "hvq_body_hand"]: + feats_rst = self.vae.forward_decoder(id_t, id_b) + + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise NotImplenetError + + # end time + end = time.time() + self.times.append(end - start) + + + joints_rst = self.feats2joints(feats_rst, skel=self.skel, motion_type=self.input_format) + joints_rst = joints_rst.view(feats_rst.shape[0], feats_rst.shape[1], self.njoints, 3) + joints_ref = self.feats2joints(motions, skel=self.skel, motion_type=self.input_format) + joints_ref = joints_ref.view(motions.shape[0], motions.shape[1], self.njoints, 3) + + + + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + return rs_set + + + def t2m_eval_smplx(self, batch): + + + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("Not supported vae type!") + + # end time + end = time.time() + self.times.append(end - start) + # + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type in ['smplx_212', 'smplx_159'] + + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + return rs_set + + + + + def t2m_eval_smplx_save_motion_token(self, batch): + # texts = batch["text"] + name = batch["name"] + motions = batch["motion"].detach().clone() + # lengths = batch["length"] + # word_embs = batch["word_embs"].detach().clone() + # pos_ohot = batch["pos_ohot"].detach().clone() + # text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + elif self.vae_type in ["humanvq", "spatial_MLP_vqvae", "spatial_transformer_vqvae"]: + quants = self.vae.encode(motions) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type in ["humanvq"]: + feats_rst = self.vae.forward_decoder(quants) + feats_rst = feats_rst.reshape(motions.shape[0], motions.shape[1], -1) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("Not supported vae type!") + + # end time + end = time.time() + self.times.append(end - start) + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + + #############for save tokens############# + feats_rst = self.renorm2ori(feats_rst) + motions = self.renorm2ori(motions) + feats_rst_path = os.path.join(f"./visualization/visualization/test_case/{self.cfg.TRAIN.DATASETS[0]}/{self.cfg.model.vae_type}_VAE_motionx_feats_rst_norm_back", name[0] + '.npy') + feats_ref_path = os.path.join(f"./visualization/visualization/test_case/{self.cfg.TRAIN.DATASETS[0]}/{self.cfg.model.vae_type}_VAE_motionx_feats_ref_norm_back", name[0] + '.npy') + + feats_rst_parent_directory = os.path.dirname(feats_rst_path) + if not os.path.exists(feats_rst_parent_directory): + os.makedirs(feats_rst_parent_directory) + + feats_ref_parent_directory = os.path.dirname(feats_ref_path) + if not os.path.exists(feats_ref_parent_directory): + os.makedirs(feats_ref_parent_directory) + + + np.save(feats_rst_path, feats_rst[0].detach().cpu().numpy()) + np.save(feats_ref_path, motions[0].detach().cpu().numpy()) + + + + assert self.motion_type == ['smplx_212', 'smplx_159'] + + + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + # "lat_t": text_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + "motion_code": quants, + "name": name + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + # "lat_t": text_emb, + "motion_code": quants, + "name": name + } + + return rs_set + + + def t2m_eval_smplx_text_all(self, batch): + assert self.condition == 'text_all' + texts = [] + for i in range(len(batch["text"])): + texts.append(batch["text"][i] +' ' + batch['face_text'][i] + ' ' + batch["body_text"][i] + ' ' + batch["hand_text"][i]) + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text_all': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type == 'smplx_212' + + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + + return rs_set + + + + def t2m_eval_smplx_text_face(self, batch): + assert self.condition == 'text_face' + texts = [] + for i in range(len(batch["text"])): + texts.append(batch["text"][i] +' ' + batch['face_text'][i]) + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text_face': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type == 'smplx_212' + + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + + return rs_set + + + + + + def t2m_eval_smplx_text_body(self, batch): + assert self.condition == 'text_body' + texts = [] + for i in range(len(batch["text"])): + texts.append(batch["text"][i] +' ' + batch['body_text'][i]) + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text_body': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + else: + raise NotImplementedError + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type == 'smplx_212' + + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + + return rs_set + + + + + def t2m_eval_smplx_text_hand(self, batch): + assert self.condition == 'text_hand' + texts = [] + for i in range(len(batch["text"])): + texts.append(batch["text"][i] +' ' + batch['hand_text'][i]) + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text_hand': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + else: + raise NotImplementedError + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type == 'smplx_212' + + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + + return rs_set + + + + def t2m_eval_smplx_text_face_body(self, batch): + assert self.condition == 'text_face_body' + texts = [] + for i in range(len(batch["text"])): + texts.append(batch["text"][i] +' ' + batch['face_text'][i] + ' ' + batch["body_text"][i]) + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + if self.stage in ['diffusion', 'vae_diffusion']: + # diffusion reverse + if self.do_classifier_free_guidance: + uncond_tokens = [""] * len(texts) + if self.condition == 'text_face_body': + uncond_tokens.extend(texts) + elif self.condition == 'text_uncond': + uncond_tokens.extend(uncond_tokens) + else: + raise NotImplementedError + texts = uncond_tokens + text_emb = self.text_encoder(texts) + z = self._diffusion_reverse(text_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert", "actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("Not supported vae type!") + if self.condition in ['text_uncond']: + # uncond random sample + z = torch.randn_like(z) + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert", "actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + if self.cfg.TRAIN.use_joints: + joints_rst = self.feats2joints(feats_rst, self.motion_type, self.smplx_model) + joints_ref = self.feats2joints(motions, self.motion_type, self.smplx_model) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feats_rst) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + assert self.motion_type == 'smplx_212' + + + recons_mov = self.t2m_moveencoder(feats_rst).detach() + recons_emb = self.t2m_motionencoder(recons_mov, m_lens) + motion_mov = self.t2m_moveencoder(motions).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + if self.cfg.model.eval_text_source == 'token': + text_emb = self.t2m_textencoder(word_embs, pos_ohot,text_lengths)[align_idx] + elif self.cfg.model.eval_text_source == 'only_text_token': + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + elif self.cfg.model.eval_text_source in ['caption']: + if self.cfg.model.eval_text_encode_way == 'clip': + raise NotImplementedError + + elif self.cfg.model.eval_text_encode_way == 't5': + raise NotImplementedError + + elif 'GRU' in self.cfg.model.eval_text_encode_way: + text_emb = self.t2m_textencoder(word_embs, text_lengths)[align_idx] + else: + raise NotImplementedError + if self.cfg.TRAIN.use_joints: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + else: + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + "lat_t": text_emb, + "lat_m": motion_emb, + "lat_rm": recons_emb, + } + + + return rs_set + + + + def a2m_eval(self, batch): + actions = batch["action"] + actiontexts = batch["action_text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + + if self.do_classifier_free_guidance: + cond_emb = torch.cat((torch.zeros_like(actions), actions)) + + if self.stage in ['diffusion', 'vae_diffusion']: + z = self._diffusion_reverse(cond_emb, lengths) + elif self.stage in ['vae']: + if self.vae_type in ["mld", "vposert","actor"]: + z, dist_m = self.vae.encode(motions, lengths) + else: + raise TypeError("vae_type must be mcross or actor") + + with torch.no_grad(): + if self.vae_type in ["mld", "vposert","actor"]: + feats_rst = self.vae.decode(z, lengths) + elif self.vae_type == "no": + feats_rst = z.permute(1, 0, 2) + else: + raise TypeError("vae_type must be mcross or actor or mld") + + mask = batch["mask"] + joints_rst = self.feats2joints(feats_rst, mask) + joints_ref = self.feats2joints(motions, mask) + joints_eval_rst = self.feats2joints_eval(feats_rst, mask) + joints_eval_ref = self.feats2joints_eval(motions, mask) + + rs_set = { + "m_action": actions, + "m_ref": motions, + "m_rst": feats_rst, + "m_lens": lengths, + "joints_rst": joints_rst, + "joints_ref": joints_ref, + "joints_eval_rst": joints_eval_rst, + "joints_eval_ref": joints_eval_ref, + } + return rs_set + + def a2m_gt(self, batch): + actions = batch["action"] + actiontexts = batch["action_text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + mask = batch["mask"] + + joints_ref = self.feats2joints(motions.to('cuda'), mask.to('cuda')) + + rs_set = { + "m_action": actions, + "m_text": actiontexts, + "m_ref": motions, + "m_lens": lengths, + "joints_ref": joints_ref, + } + return rs_set + + def eval_gt(self, batch, renoem=True): + + motions = batch["motion"].detach().clone() + lengths = batch["length"] + + # feats_rst = self.datamodule.renorm4t2m(feats_rst) + if renoem: + motions = self.datamodule.renorm4t2m(motions) + + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + word_embs = batch["word_embs"].detach() + pos_ohot = batch["pos_ohot"].detach() + text_lengths = batch["text_len"].detach() + + motion_mov = self.t2m_moveencoder(motions[..., :-4]).detach() + motion_emb = self.t2m_motionencoder(motion_mov, m_lens) + + # t2m text encoder + text_emb = self.t2m_textencoder(word_embs, pos_ohot, + text_lengths)[align_idx] + + # joints recover + joints_ref = self.feats2joints(motions) + + rs_set = { + "m_ref": motions, + "lat_t": text_emb, + "lat_m": motion_emb, + "joints_ref": joints_ref, + } + return rs_set + + def allsplit_step(self, split: str, batch, batch_idx): + if split in ["train", "val"]: + if self.stage == "vae": + if self.vae_type in ["mld", "vposert","actor"]: + rs_set = self.train_vae_forward(batch) + rs_set["lat_t"] = rs_set["lat_m"] + else: + rs_set = self.train_vae_forward(batch) + elif self.stage == "diffusion": + rs_set = self.train_diffusion_forward(batch) + elif self.stage == "vae_diffusion": + vae_rs_set = self.train_vae_forward(batch) + diff_rs_set = self.train_diffusion_forward(batch) + t2m_rs_set = self.test_diffusion_forward(batch, + finetune_decoder=True) + # merge results + rs_set = { + **vae_rs_set, + **diff_rs_set, + "gen_m_rst": t2m_rs_set["m_rst"], + "gen_joints_rst": t2m_rs_set["joints_rst"], + "lat_t": t2m_rs_set["lat_t"], + } + + else: + raise ValueError(f"Not support this stage {self.stage}!") + loss = self.losses[split].update(rs_set) + if loss is None: + raise ValueError( + "Loss is None, this happend with torchmetrics > 0.7") + + # Compute the metrics - currently evaluate results from text to motion + if split in ["val", "test"]: + if self.condition in ['text', 'text_uncond', 'text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion']: + # use t2m evaluators + if self.input_format in ['vector_263', 'root_body_pos_vel_hand_pos_vel']: + if self.condition == 'text': + if self.cfg.TEST.inference_vq_code: + rs_set = self.t2m_eval_save_motion_token(batch) + else: + if self.cfg.EVAL.use_tmr_eval: + rs_set = self.tmr_t2m_eval(batch) + else: + rs_set = self.t2m_eval(batch) + else: + raise NotImplementedError + elif self.input_format in ['smplx_212', 'smplx_159']: + if self.condition == 'text': + if self.cfg.TEST.inference_vq_code: + rs_set = self.t2m_eval_smplx_save_motion_token(batch) + else: + rs_set = self.t2m_eval_smplx(batch) + elif self.condition == 'text_all': + rs_set = self.t2m_eval_smplx_text_all(batch) + elif self.condition == 'text_face': + rs_set = self.t2m_eval_smplx_text_face(batch) + elif self.condition == 'text_body': + rs_set = self.t2m_eval_smplx_text_body(batch) + elif self.condition == 'text_hand': + rs_set = self.t2m_eval_smplx_text_hand(batch) + elif self.condition == 'text_face_body': + rs_set = self.t2m_eval_smplx_text_face_body(batch) + else: + raise NotImplementedError + # elif self.input_format in ['root_position', 'root_position_vel', 'root_position_rot6d', 'root_rot6d', 'all', 'root_body_pos_vel_hand_all', 'root_body_pos_vel_hand_pos_vel', 'root_body_pos_vel_hand_pos', 'root_body_pos_vel_hand_rot', 'root_position_vel_only_body', 'root_body_pos_vel_hand_pos_vel_hand_wrist']: + elif not self.eval_on_text: + rs_set = self.normal_eval(batch) + else: + rs_set = self.t2m_eval(batch) + # else: + # raise NotImplementedError + + elif self.condition == 'action': + # use a2m evaluators + rs_set = self.a2m_eval(batch) + else: + raise NotImplementedError + # MultiModality evaluation sperately + if self.trainer.datamodule.is_mm: + metrics_dicts = ['MMMetrics'] + else: + metrics_dicts = self.metrics_dict + + # metrics_dicts = [] + for metric in metrics_dicts: + if metric == "TemosMetric": + phase = split if split != "val" else "eval" + if eval(f"self.cfg.{phase.upper()}.DATASETS")[0].lower( + ) not in [ + "humanml3d", + "kit", + "motionx", + "motionx_v25", + 'motionx_v26' + ]: + raise TypeError( + "APE and AVE metrics only support humanml3d and kit datasets now" + ) + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"]) + + elif metric == "TemosMetric_body_hand": + phase = split if split != "val" else "eval" + if eval(f"self.cfg.{phase.upper()}.DATASETS")[0].lower( + ) not in [ + "humanml3d", + "kit", + "motionx", + "motionx_v25", + 'motionx_v26' + ]: + raise TypeError( + "APE and AVE metrics only support humanml3d and kit datasets now" + ) + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"]) + + elif metric == "TM2TMetrics": + getattr(self, metric).update( + # lat_t, latent encoded from diffusion-based text + # lat_rm, latent encoded from reconstructed motion + # lat_m, latent encoded from gt motion + # rs_set['lat_t'], rs_set['lat_rm'], rs_set['lat_m'], batch["length"]) + rs_set["lat_t"], + rs_set["lat_rm"], + rs_set["lat_m"], + batch["length"], + ) + elif metric == "TM2TMetrics_R256": + getattr(self, metric).update( + # lat_t, latent encoded from diffusion-based text + # lat_rm, latent encoded from reconstructed motion + # lat_m, latent encoded from gt motion + # rs_set['lat_t'], rs_set['lat_rm'], rs_set['lat_m'], batch["length"]) + rs_set["lat_t"], + rs_set["lat_rm"], + rs_set["lat_m"], + batch["length"], + ) + elif metric == "TMR_TM2TMetrics": + getattr(self, metric).update( + # lat_t, latent encoded from diffusion-based text + # lat_rm, latent encoded from reconstructed motion + # lat_m, latent encoded from gt motion + # rs_set['lat_t'], rs_set['lat_rm'], rs_set['lat_m'], batch["length"]) + rs_set["lat_t_tmr"], + rs_set["lat_rm_tmr"], + rs_set["lat_m_tmr"], + batch["length"], + ) + elif metric == "UncondMetrics": + getattr(self, metric).update( + recmotion_embeddings=rs_set["lat_rm"], + gtmotion_embeddings=rs_set["lat_m"], + lengths=batch["length"], + ) + elif metric in ["MRMetrics", "MRMetrics_body_hand"]: + if self.cfg.TEST.inference_vq_code: + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"], + rs_set["name"]) + else: + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"]) + + elif metric == "MMMetrics": + getattr(self, metric).update(rs_set["lat_rm"].unsqueeze(0), + batch["length"]) + elif metric == "HUMANACTMetrics": + getattr(self, metric).update(rs_set["m_action"], + rs_set["joints_eval_rst"], + rs_set["joints_eval_ref"], + rs_set["m_lens"]) + elif metric == "UESTCMetrics": + # the stgcn model expects rotations only + getattr(self, metric).update( + rs_set["m_action"], + rs_set["m_rst"].view(*rs_set["m_rst"].shape[:-1], 6, + 25).permute(0, 3, 2, 1)[:, :-1], + rs_set["m_ref"].view(*rs_set["m_ref"].shape[:-1], 6, + 25).permute(0, 3, 2, 1)[:, :-1], + rs_set["m_lens"]) + else: + raise TypeError(f"Not support this metric {metric}") + + # return forward output rather than loss during test + # self.datamodule.renorm4t2m + if split in ["test"]: + if self.cfg.TEST.inference_vq_code: + if self.vae_type in ["hvq", "hvq_body_hand"]: + return rs_set["motion_code_t"], rs_set["motion_code_b"], batch["name"] + else: + return rs_set["motion_code"], batch["name"] + + if self.motion_type == 'vector_263': + return rs_set["joints_rst"], batch["length"] + elif self.motion_type in ['smplx_212', 'smplx_159']: + if self.cfg.TRAIN.use_joints: + return rs_set["m_rst"], batch["length"], rs_set["m_ref"] + else: + return batch["length"] + elif self.motion_type in ['ric_rot']: + return rs_set["joints_rst"], batch["length"], rs_set["joints_ref"] + + else: + return batch["length"] + return loss diff --git a/Evaluator_272/mld/models/modeltype/temos.py b/Evaluator_272/mld/models/modeltype/temos.py new file mode 100644 index 0000000000000000000000000000000000000000..686de3c1fec9f67dbcb3400b4f84f00a2e44cab9 --- /dev/null +++ b/Evaluator_272/mld/models/modeltype/temos.py @@ -0,0 +1,662 @@ +from typing import List, Optional + +import torch +from torch import Tensor +from omegaconf import DictConfig +from mld.models.tools.tools import remove_padding + +from mld.models.metrics import ComputeMetrics +from torchmetrics import MetricCollection +from mld.models.modeltype.base import BaseModel +from torch.distributions.distribution import Distribution +from mld.config import instantiate_from_config + +from mld.models.losses.temos import TemosLosses +from torch.optim import AdamW +from sentence_transformers import SentenceTransformer + +from mld.models.architectures import t2m_textenc, t2m_motionenc +import os + +import time + +import numpy as np +import torch.nn.functional as f +from pathlib import Path + +class TEMOS(BaseModel): + def __init__(self, cfg, datamodule, **kwargs): + super().__init__() + + self.is_vae = cfg.model.vae + self.cfg = cfg + self.condition = cfg.model.condition + self.stage = cfg.TRAIN.STAGE + self.datamodule = datamodule + self.njoints = cfg.DATASET.NJOINTS + self.debug = cfg.DEBUG + self.motion_type = cfg.DATASET.MOTION_TYPE + + self.textencoder = instantiate_from_config(cfg.textencoder) + self.motionencoder = instantiate_from_config(cfg.motionencoder) + self.motiondecoder = instantiate_from_config(cfg.motiondecoder) + + + if self.condition in ["text", "text_uncond", 'text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion']: + self.feats2joints = datamodule.feats2joints + + if cfg.TRAIN.OPTIM.TYPE.lower() == "adamw": + self.optimizer = AdamW(lr=cfg.TRAIN.OPTIM.LR, + params=self.parameters()) + else: + raise NotImplementedError( + "Do not support other optimizer for now.") + + + self._losses = MetricCollection({ + split: TemosLosses(vae=self.is_vae, mode="xyz", cfg=cfg) + for split in ["losses_train", "losses_test", "losses_val"] + }) + + self.losses = {key: self._losses["losses_" + key] for key in ["train", "test", "val"]} + + self.metrics_dict = cfg.METRIC.TYPE + self.configure_metrics() + + # If we want to overide it at testing time + self.sample_mean = False + self.fact = None + + if self.cfg.LOSS.USE_INFONCE_FILTER: + self.filter_model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2') + + self.retrieval_text_embedding = [] + self.retrieval_motion_embedding = [] + self.retrieval_sbert_embedding = [] + + self.retrieval_corres_name = [] + + self.gt_idx = 0 + + self.__post_init__() + + # Forward: text => motion + def forward(self, batch: dict) -> List[Tensor]: + datastruct_from_text = self.text_to_motion_forward(batch["text"], + batch["length"]) + + return remove_padding(datastruct_from_text.joints, batch["length"]) + + + def _get_t2m_evaluator(self, cfg): + """ + load T2M text encoder and motion encoder for evaluating + """ + + # init module + if cfg.model.eval_text_source == 'token': + + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCo(word_size=cfg.model.t2m_textencoder.dim_word, + pos_size=cfg.model.t2m_textencoder.dim_pos_ohot, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + elif cfg.model.eval_text_source == 'only_text_token': + + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCoV2(word_size=cfg.model.t2m_textencoder.dim_word, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + + elif cfg.model.eval_text_source in ['caption']: + + if cfg.model.eval_text_encode_way == 'clip': + self.t2m_textencoder, clip_preprocess = clip.load("ViT-B/32", device=opt.device, jit=False) # Must set jit=False for training + clip.model.convert_weights(text_enc)# Actually this line is unnecessary since clip by default already on float16 + self.t2m_textencoder.eval() + for p in text_enc.parameters(): + p.requires_grad = False + + elif cfg.model.eval_text_encode_way == 't5': + os.environ["TOKENIZERS_PARALLELISM"] = "false" + self.t2m_textencoder = SentenceTransformer('sentence-transformers/sentence-t5-xl').to(opt.device) + self.t2m_textencoder.eval() + for p in self.t2m_textencoder.parameters(): + p.requires_grad = False + + elif 'GRU' in cfg.model.eval_text_encode_way: + self.t2m_textencoder = t2m_textenc.TextEncoderBiGRUCoV2(word_size=cfg.model.t2m_textencoder.dim_word, + hidden_size=cfg.model.t2m_textencoder.dim_text_hidden, + output_size=cfg.model.t2m_textencoder.dim_coemb_hidden, + ) + else: + raise NotImplementedError + + + + self.t2m_moveencoder = t2m_motionenc.MovementConvEncoder( + input_size=cfg.DATASET.NFEATS - 4, + hidden_size=cfg.model.t2m_motionencoder.dim_move_hidden, + output_size=cfg.model.t2m_motionencoder.dim_move_latent, + ) + + + self.t2m_motionencoder = t2m_motionenc.MotionEncoderBiGRUCo( + input_size=cfg.model.t2m_motionencoder.dim_move_latent, + hidden_size=cfg.model.t2m_motionencoder.dim_motion_hidden, + output_size=cfg.model.t2m_motionencoder.dim_motion_latent, + ) + + # load pretrianed + dataname = cfg.TEST.DATASETS[0] + + t2m_checkpoint = torch.load( + os.path.join(cfg.model.t2m_path, dataname, + "text_mot_match/model/finest.tar"), map_location=torch.device('cpu')) + + self.t2m_textencoder.load_state_dict(t2m_checkpoint["text_encoder"]) + + self.t2m_moveencoder.load_state_dict( + t2m_checkpoint["movement_encoder"]) + + + self.t2m_motionencoder.load_state_dict( + t2m_checkpoint["motion_encoder"]) + + # freeze params + self.t2m_textencoder.eval() + self.t2m_moveencoder.eval() + self.t2m_motionencoder.eval() + for p in self.t2m_textencoder.parameters(): + p.requires_grad = False + for p in self.t2m_moveencoder.parameters(): + p.requires_grad = False + for p in self.t2m_motionencoder.parameters(): + p.requires_grad = False + + + + def sample_from_distribution(self, distribution: Distribution, *, + fact: Optional[bool] = None, + sample_mean: Optional[bool] = False) -> Tensor: + fact = fact if fact is not None else self.fact + sample_mean = sample_mean if sample_mean is not None else self.sample_mean + + if sample_mean: + return distribution.loc + + # Reparameterization trick + if fact is None: + return distribution.rsample() + + # Resclale the eps + eps = distribution.rsample() - distribution.loc + latent_vector = distribution.loc + fact * eps + return latent_vector + + def text_to_motion_forward(self, text_sentences: List[str], lengths: List[int], *, + return_latent: bool = False): + # Encode the text to the latent space + if self.is_vae: + distribution = self.textencoder(text_sentences) + latent_vector = self.sample_from_distribution(distribution) + else: + distribution = None + latent_vector = self.textencoder(text_sentences) + + # Decode the latent vector to a motion + features = self.motiondecoder(latent_vector, lengths) + # datastruct = self.Datastruct(features=features) + + if not return_latent: + return features + return features, latent_vector, distribution + + def motion_to_motion_forward(self, features, + lengths: Optional[List[int]] = None, + return_latent: bool = False + ): + if self.is_vae: + distribution = self.motionencoder(features, lengths) + latent_vector = self.sample_from_distribution(distribution) + else: + distribution = None + latent_vector: Tensor = self.motionencoder(features, lengths) + + # Decode the latent vector to a motion + features = self.motiondecoder(latent_vector, lengths) + # datastruct = self.Datastruct(features=features) + + if not return_latent: + return features + return features, latent_vector, distribution + + + def save_embeddings(self, batch): + + with torch.no_grad(): + motion_all, text_all = None, None + sbert_embedding_all = None + + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + retrieval_name = batch['retrieval_name'] + + text_embedding = self.textencoder(texts).loc + motion_embedding = self.motionencoder(motions, lengths).loc + + Emb_text = f.normalize(text_embedding, dim=1) + Emb_motion = f.normalize(motion_embedding, dim=1) + + if text_all == None: + text_all = Emb_text + else: + text_all = torch.cat((text_all, Emb_text), 0) + + if motion_all == None: + motion_all = Emb_motion + else: + motion_all = torch.cat((motion_all, Emb_motion), 0) + + if self.cfg.LOSS.USE_INFONCE_FILTER: + sbert_embedding = torch.tensor(self.filter_model.encode(texts)) # (bs, 384) + sbert_embedding = f.normalize(sbert_embedding, dim=1) + + if sbert_embedding_all == None: + sbert_embedding_all = sbert_embedding + else: + sbert_embedding_all = torch.cat((sbert_embedding_all, sbert_embedding), 0) + + self.retrieval_sbert_embedding.append(sbert_embedding_all.detach().cpu().numpy()) + + self.retrieval_text_embedding.append(text_all.detach().cpu().numpy()) + self.retrieval_motion_embedding.append(motion_all.detach().cpu().numpy()) + self.retrieval_corres_name.append(retrieval_name) + + + + def t2m_eval(self, batch): + retrieval_name = batch['retrieval_name'] + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + word_embs = batch["word_embs"].detach().clone() + pos_ohot = batch["pos_ohot"].detach().clone() + text_lengths = batch["text_len"].detach().clone() + + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + assert self.stage in ['temos'] + + # Encode the text/decode to a motion + with torch.no_grad(): + ret = self.text_to_motion_forward(texts, + lengths, + return_latent=True) + feat_from_text, latent_from_text, distribution_from_text = ret + + # Encode the motion/decode to a motion + ret = self.motion_to_motion_forward(motions, + lengths, + return_latent=True) + feat_from_motion, latent_from_motion, distribution_from_motion = ret + + # end time + end = time.time() + self.times.append(end - start) + + # joints recover + joints_ref = self.feats2joints(motions) + joints_rst = self.feats2joints(feat_from_text) + + # renorm for t2m evaluators + feats_rst = self.datamodule.renorm4t2m(feat_from_text) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D.UNIT_LEN, + rounding_mode="floor") + + + rs_set = { + "m_ref": motions, + "m_rst": feats_rst, + # "lat_t": text_emb, + # "lat_m": motion_emb, + # "lat_rm": recons_emb, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + + return rs_set + + + def tmr_gt_eval(self, batch): + texts = batch["text"] + motions = batch["motion"].detach().clone() + lengths = batch["length"] + # word_embs = batch["word_embs"].detach().clone() + # pos_ohot = batch["pos_ohot"].detach().clone() + # text_lengths = batch["text_len"].detach().clone() + name = batch["retrieval_name"] + bs, seq = motions.shape[:2] + + # start + start = time.time() + + if self.trainer.datamodule.is_mm: + texts = texts * self.cfg.TEST.MM_NUM_REPEATS + motions = motions.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + lengths = lengths * self.cfg.TEST.MM_NUM_REPEATS + word_embs = word_embs.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + pos_ohot = pos_ohot.repeat_interleave(self.cfg.TEST.MM_NUM_REPEATS, + dim=0) + text_lengths = text_lengths.repeat_interleave( + self.cfg.TEST.MM_NUM_REPEATS, dim=0) + + bs = self.cfg.TEST.MM_NUM_REPEATS + + assert self.stage in ['temos'] + self.textencoder.eval() + self.motionencoder.eval() + self.motiondecoder.eval() + with torch.no_grad(): + + ret = self.text_to_motion_forward(texts, + lengths, + return_latent=True) + feat_from_text, latent_from_text, distribution_from_text = ret + # Encode the motion/decode to a motion + ret = self.motion_to_motion_forward(motions, + lengths, + return_latent=True) + feat_from_motion, latent_from_motion, distribution_from_motion = ret + + ret = self.motion_to_motion_forward(feat_from_motion, lengths, return_latent=True) + _, latent_from_motion_rst_motion, _ = ret + + # end time + end = time.time() + self.times.append(end - start) + # joints recover + joints_ref = self.feats2joints(motions) + joints_rst = self.feats2joints(feat_from_text) + + + # #########################saving output################### + feats_rst = self.datamodule.renorm4t2m(feat_from_text) + motions = self.datamodule.renorm4t2m(motions) + # t2m motion encoder + m_lens = lengths.copy() + m_lens = torch.tensor(m_lens, device=motions.device) + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + feats_rst = feats_rst[align_idx] + m_lens = m_lens[align_idx] + m_lens = torch.div(m_lens, + self.cfg.DATASET.HUMANML3D_272.UNIT_LEN, + rounding_mode="floor") + + recons_emb_tmr = latent_from_motion_rst_motion[align_idx] + motion_emb_tmr = latent_from_motion[align_idx] + text_emb_tmr = latent_from_text[align_idx] + + self.textencoder.train() + self.motionencoder.train() + self.motiondecoder.train() + + rs_set = { + "m_ref": motions, + "lat_t_tmr": text_emb_tmr, + "lat_m_tmr": motion_emb_tmr, + "lat_rm_tmr": recons_emb_tmr, + "joints_ref": joints_ref, + "joints_rst": joints_rst, + } + return rs_set + + def allsplit_step(self, split: str, batch, batch_idx): + emb_dist = None + if self.cfg.LOSS.USE_INFONCE and self.cfg.LOSS.USE_INFONCE_FILTER: + with torch.no_grad(): + text_embedding = self.filter_model.encode(batch["text"]) + text_embedding = torch.tensor(text_embedding).to(batch['motion'][0]) + normalized = f.normalize(text_embedding, p=2, dim=1) + emb_dist = normalized.matmul(normalized.T) + + # Encode the text/decode to a motion + ret = self.text_to_motion_forward(batch["text"], + batch["length"], + return_latent=True) + feat_from_text, latent_from_text, distribution_from_text = ret + + # Encode the motion/decode to a motion + ret = self.motion_to_motion_forward(batch["motion"], + batch["length"], + return_latent=True) + feat_from_motion, latent_from_motion, distribution_from_motion = ret + + # GT data + # datastruct_ref = batch["datastruct"] + + # Compare to a Normal distribution + if self.is_vae: + # Create a centred normal distribution to compare with + mu_ref = torch.zeros_like(distribution_from_text.loc) + scale_ref = torch.ones_like(distribution_from_text.scale) + distribution_ref = torch.distributions.Normal(mu_ref, scale_ref) + else: + distribution_ref = None + # Compute the losses + loss = self.losses[split].update(f_text=feat_from_text, + f_motion=feat_from_motion, + f_ref=batch["motion"], + lat_text=latent_from_text, + lat_motion=latent_from_motion, + dis_text=distribution_from_text, + dis_motion=distribution_from_motion, + dis_ref=distribution_ref, + emb_dist=emb_dist) + + if loss is None: + raise ValueError("Loss is None, this happend with torchmetrics > 0.7") + + + if split in ["val", "test"]: + # self.save_embeddings(batch) + if self.cfg.EVAL.eval_self_on_gt: + rs_set = self.tmr_gt_eval(batch) + else: + if self.condition in ['text', 'text_uncond', 'text_all', 'text_face', 'text_body', 'text_hand', 'text_face_body', 'text_seperate', 'only_pose_concat', 'only_pose_fusion']: + # use t2m evaluators + rs_set = self.t2m_eval(batch) + elif self.condition == 'action': + # use a2m evaluators + rs_set = self.a2m_eval(batch) + else: + raise NotImplementedError + + # MultiModality evaluation sperately + if self.trainer.datamodule.is_mm: + metrics_dicts = ['MMMetrics'] + else: + metrics_dicts = self.metrics_dict + + for metric in metrics_dicts: + if metric == "TemosMetric": + phase = split if split != "val" else "eval" + if eval(f"self.cfg.{phase.upper()}.DATASETS")[0].lower( + ) not in [ + "humanml3d", + "kit" + ]: + raise TypeError( + "APE and AVE metrics only support humanml3d and kit datasets now" + ) + + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"]) + elif metric == "TM2TMetrics": + getattr(self, metric).update( + rs_set['lat_t'], + rs_set["lat_rm"], + rs_set["lat_m"], + batch["length"], + ) + elif metric == "UncondMetrics": + getattr(self, metric).update( + recmotion_embeddings=rs_set["lat_rm"], + gtmotion_embeddings=rs_set["lat_m"], + lengths=batch["length"], + ) + elif metric == "MRMetrics": + getattr(self, metric).update(rs_set["joints_rst"], + rs_set["joints_ref"], + batch["length"]) + elif metric == "MMMetrics": + getattr(self, metric).update(rs_set["lat_rm"].unsqueeze(0), + batch["length"]) + elif metric == "HUMANACTMetrics": + getattr(self, metric).update(rs_set["m_action"], + rs_set["joints_eval_rst"], + rs_set["joints_eval_ref"], + rs_set["m_lens"]) + elif metric == "TMR_TM2TMetrics": + getattr(self, metric).update( + rs_set["lat_t_tmr"], + rs_set["lat_rm_tmr"], + rs_set["lat_m_tmr"], + batch["length"], + ) + elif metric == "UESTCMetrics": + # the stgcn model expects rotations only + getattr(self, metric).update( + rs_set["m_action"], + rs_set["m_rst"].view(*rs_set["m_rst"].shape[:-1], 6, + 25).permute(0, 3, 2, 1)[:, :-1], + rs_set["m_ref"].view(*rs_set["m_ref"].shape[:-1], 6, + 25).permute(0, 3, 2, 1)[:, :-1], + rs_set["m_lens"]) + else: + raise TypeError(f"Not support this metric {metric}") + + + if split in ["test"]: + if self.motion_type == 'vector_263': + return rs_set["joints_rst"], batch["length"], batch["text"] + elif self.motion_type == 'smplx_212': + if self.cfg.TRAIN.use_joints: + return rs_set["m_rst"], batch["length"], rs_set["m_ref"] + else: + return batch["length"] + + return loss + + + def allsplit_epoch_end(self, split: str, outputs): + dico = {} + + if split in ["val", "test"]: + + if (self.trainer.current_epoch+1) % 1000 == 0: + output_dir = Path( + os.path.join( + self.cfg.FOLDER, + str(self.cfg.model.model_type), + str(self.cfg.NAME), + "embeddings", + split, + "epoch_" + str(self.trainer.current_epoch) + )) + + os.makedirs(output_dir, exist_ok=True) + + self.retrieval_text_embedding = torch.cat([i.view(-1, i.shape[-1]) for i in self.all_gather(self.retrieval_text_embedding)], dim=0) + self.retrieval_motion_embedding = torch.cat([i.view(-1, i.shape[-1]) for i in self.all_gather(self.retrieval_motion_embedding)], dim=0) + + + tmp_retrieval_name = [] + for i in self.all_gather(self.retrieval_corres_name): + tmp_retrieval_name += i + self.retrieval_corres_name = tmp_retrieval_name + with open(output_dir/"test_name_debug.txt", "w") as test_name_file: + for i in self.retrieval_corres_name: + test_name_file.write(i + '\n') + + if self.cfg.LOSS.USE_INFONCE_FILTER: + self.retrieval_sbert_embedding = torch.cat([i.view(-1, i.shape[-1]) for i in self.all_gather(self.retrieval_sbert_embedding)], dim=0) + np.save(output_dir/"sbert_embedding.npy", self.retrieval_sbert_embedding.detach().cpu().numpy()) + + + np.save(output_dir/"text_embedding.npy", self.retrieval_text_embedding.detach().cpu().numpy())# (2324, 256) + np.save(output_dir/"motion_embedding.npy", self.retrieval_motion_embedding.detach().cpu().numpy()) + + print('save embedding in {} at {}'.format(output_dir, self.trainer.current_epoch)) + + + self.retrieval_text_embedding = [] + self.retrieval_motion_embedding = [] + self.retrieval_sbert_embedding = [] + + if split in ["train", "val"]: + losses = self.losses[split] + loss_dict = losses.compute(split) + losses.reset() + dico.update({ + losses.loss2logname(loss, split): value.item() + for loss, value in loss_dict.items() if not torch.isnan(value) + }) + + if split in ["val", "test"]: + + if self.trainer.datamodule.is_mm and "TM2TMetrics" in self.metrics_dict: + metrics_dicts = ['MMMetrics'] + else: + metrics_dicts = self.metrics_dict + for metric in metrics_dicts: + metrics_dict = getattr( + self, + metric).compute(sanity_flag=self.trainer.sanity_checking) + # reset metrics + getattr(self, metric).reset() + dico.update({ + f"Metrics/{metric}": value.item() + for metric, value in metrics_dict.items() + }) + if split != "test": + dico.update({ + "epoch": float(self.trainer.current_epoch), + "step": float(self.trainer.current_epoch), + }) + # don't write sanity check into log + if not self.trainer.sanity_checking: + self.log_dict(dico, sync_dist=True, rank_zero_only=True) + + def training_epoch_end(self, outputs): + return self.allsplit_epoch_end("train", outputs) diff --git a/Evaluator_272/mld/models/operator/__init__.py b/Evaluator_272/mld/models/operator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1864a99ad508dba7501923a5e46c0b8f80c35473 --- /dev/null +++ b/Evaluator_272/mld/models/operator/__init__.py @@ -0,0 +1,4 @@ +from .adain import AdaptiveInstanceNorm1d +from .blocks import ConvBlock, LinearBlock +from .position_encoding_layer import PositionalEncoding + diff --git a/Evaluator_272/mld/models/operator/adain.py b/Evaluator_272/mld/models/operator/adain.py new file mode 100644 index 0000000000000000000000000000000000000000..3588f33e19fa3434ee2801f941c40566923abf41 --- /dev/null +++ b/Evaluator_272/mld/models/operator/adain.py @@ -0,0 +1,66 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class AdaptiveInstanceNorm1d(nn.Module): + def __init__(self, num_features, eps=1e-5, momentum=0.1): + super(AdaptiveInstanceNorm1d, self).__init__() + self.num_features = num_features + self.eps = eps + self.momentum = momentum + self.weight = None + self.bias = None + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + + def forward(self, x, direct_weighting=False, no_std=False): + assert self.weight is not None and \ + self.bias is not None, "Please assign AdaIN weight first" + # (bs, nfeats, nframe) <= (nframe, bs, nfeats) + x = x.permute(1,2,0) + + b, c = x.size(0), x.size(1) # batch size & channels + running_mean = self.running_mean.repeat(b) + running_var = self.running_var.repeat(b) + # self.weight = torch.ones_like(self.weight) + + if direct_weighting: + x_reshaped = x.contiguous().view(b * c) + if no_std: + out = x_reshaped + self.bias + else: + out = x_reshaped.mul(self.weight) + self.bias + out = out.view(b, c, *x.size()[2:]) + else: + x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) + out = F.batch_norm( + x_reshaped, running_mean, running_var, self.weight, self.bias, + True, self.momentum, self.eps) + out = out.view(b, c, *x.size()[2:]) + + # (nframe, bs, nfeats) <= (bs, nfeats, nframe) + out = out.permute(2,0,1) + return out + + def __repr__(self): + return self.__class__.__name__ + '(' + str(self.num_features) + ')' + +def assign_adain_params(adain_params, model): + # assign the adain_params to the AdaIN layers in model + for m in model.modules(): + if m.__class__.__name__ == "AdaptiveInstanceNorm1d": + mean = adain_params[: , : m.num_features] + std = adain_params[: , m.num_features: 2 * m.num_features] + m.bias = mean.contiguous().view(-1) + m.weight = std.contiguous().view(-1) + if adain_params.size(1) > 2 * m.num_features: + adain_params = adain_params[: , 2 * m.num_features:] + + +def get_num_adain_params(model): + # return the number of AdaIN parameters needed by the model + num_adain_params = 0 + for m in model.modules(): + if m.__class__.__name__ == "AdaptiveInstanceNorm1d": + num_adain_params += 2 * m.num_features + return num_adain_params diff --git a/Evaluator_272/mld/models/operator/blocks.py b/Evaluator_272/mld/models/operator/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..1b44085b27d1c3980e6e035867d3341d0f276368 --- /dev/null +++ b/Evaluator_272/mld/models/operator/blocks.py @@ -0,0 +1,146 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mld.models.operator import AdaptiveInstanceNorm1d + + +class MLP(nn.Module): + + def __init__(self, cfg, out_dim, is_init): + super(MLP, self).__init__() + dims = cfg.MODEL.MOTION_DECODER.MLP_DIM + n_blk = len(dims) + norm = 'none' + acti = 'lrelu' + + layers = [] + for i in range(n_blk - 1): + layers += LinearBlock(dims[i], dims[i + 1], norm=norm, acti=acti) + layers += LinearBlock(dims[-1], out_dim, norm='none', acti='none') + self.model = nn.Sequential(*layers) + + if is_init: + for m in self.modules(): + if isinstance(m, nn.Linear): + #nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + nn.init.constant_(m.weight, 1) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + return self.model(x.view(x.size(0), -1)) + + +def ZeroPad1d(sizes): + return nn.ConstantPad1d(sizes, 0) + + +def get_acti_layer(acti='relu', inplace=True): + + if acti == 'relu': + return [nn.ReLU(inplace=inplace)] + elif acti == 'lrelu': + return [nn.LeakyReLU(0.2, inplace=inplace)] + elif acti == 'tanh': + return [nn.Tanh()] + elif acti == 'none': + return [] + else: + assert 0, "Unsupported activation: {}".format(acti) + + +def get_norm_layer(norm='none', norm_dim=None): + + if norm == 'bn': + return [nn.BatchNorm1d(norm_dim)] + elif norm == 'in': + # return [nn.InstanceNorm1d(norm_dim, affine=False)] # for rt42! + return [nn.InstanceNorm1d(norm_dim, affine=True)] + elif norm == 'adain': + return [AdaptiveInstanceNorm1d(norm_dim)] + elif norm == 'none': + return [] + else: + assert 0, "Unsupported normalization: {}".format(norm) + + +def get_dropout_layer(dropout=None): + if dropout is not None: + return [nn.Dropout(p=dropout)] + else: + return [] + + +def ConvLayers(kernel_size, + in_channels, + out_channels, + stride=1, + pad_type='reflect', + use_bias=True): + """ + returns a list of [pad, conv] => should be += to some list, then apply sequential + """ + + if pad_type == 'reflect': + pad = nn.ReflectionPad1d + elif pad_type == 'replicate': + pad = nn.ReplicationPad1d + elif pad_type == 'zero': + pad = ZeroPad1d + else: + assert 0, "Unsupported padding type: {}".format(pad_type) + + pad_l = (kernel_size - 1) // 2 + pad_r = kernel_size - 1 - pad_l + return [ + pad((pad_l, pad_r)), + nn.Conv1d(in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + bias=use_bias) + ] + + +def ConvBlock(kernel_size, + in_channels, + out_channels, + stride=1, + pad_type='reflect', + dropout=None, + norm='none', + acti='lrelu', + acti_first=False, + use_bias=True, + inplace=True): + """ + returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm] + """ + + layers = ConvLayers(kernel_size, + in_channels, + out_channels, + stride=stride, + pad_type=pad_type, + use_bias=use_bias) + layers += get_dropout_layer(dropout) + layers += get_norm_layer(norm, norm_dim=out_channels) + acti_layers = get_acti_layer(acti, inplace=inplace) + + if acti_first: + return acti_layers + layers + else: + return layers + acti_layers + + +def LinearBlock(in_dim, out_dim, dropout=None, norm='none', acti='relu'): + + use_bias = True + layers = [] + layers.append(nn.Linear(in_dim, out_dim, bias=use_bias)) + layers += get_dropout_layer(dropout) + layers += get_norm_layer(norm, norm_dim=out_dim) + layers += get_acti_layer(acti) + + return layers diff --git a/Evaluator_272/mld/models/operator/conv2d_gradfix.py b/Evaluator_272/mld/models/operator/conv2d_gradfix.py new file mode 100644 index 0000000000000000000000000000000000000000..64229c5a7fd04292140abac1f490619963009328 --- /dev/null +++ b/Evaluator_272/mld/models/operator/conv2d_gradfix.py @@ -0,0 +1,227 @@ +import contextlib +import warnings + +import torch +from torch import autograd +from torch.nn import functional as F + +enabled = True +weight_gradients_disabled = False + + +@contextlib.contextmanager +def no_weight_gradients(): + global weight_gradients_disabled + + old = weight_gradients_disabled + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if could_use_op(input): + return conv2d_gradfix( + transpose=False, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=0, + dilation=dilation, + groups=groups, + ).apply(input, weight, bias) + + return F.conv2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + +def conv_transpose2d( + input, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, +): + if could_use_op(input): + return conv2d_gradfix( + transpose=True, + weight_shape=weight.shape, + stride=stride, + padding=padding, + output_padding=output_padding, + groups=groups, + dilation=dilation, + ).apply(input, weight, bias) + + return F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + groups=groups, + ) + + +def could_use_op(input): + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + + if input.device.type != "cuda": + return False + + if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): + return True + + warnings.warn( + f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." + ) + + return False + + +def ensure_tuple(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + + return xs + + +conv2d_gradfix_cache = dict() + + +def conv2d_gradfix( + transpose, weight_shape, stride, padding, output_padding, dilation, groups +): + ndim = 2 + weight_shape = tuple(weight_shape) + stride = ensure_tuple(stride, ndim) + padding = ensure_tuple(padding, ndim) + output_padding = ensure_tuple(output_padding, ndim) + dilation = ensure_tuple(dilation, ndim) + + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in conv2d_gradfix_cache: + return conv2d_gradfix_cache[key] + + common_kwargs = dict( + stride=stride, padding=padding, dilation=dilation, groups=groups + ) + + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + class Conv2d(autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + if not transpose: + out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + else: + out = F.conv_transpose2d( + input=input, + weight=weight, + bias=bias, + output_padding=output_padding, + **common_kwargs, + ) + + ctx.save_for_backward(input, weight) + + return out + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + grad_input, grad_weight, grad_bias = None, None, None + + if ctx.needs_input_grad[0]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, weight, None) + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input) + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum((0, 2, 3)) + + return grad_input, grad_weight, grad_bias + + class Conv2dGradWeight(autograd.Function): + @staticmethod + def forward(ctx, grad_output, input): + op = torch._C._jit_get_operation( + "aten::cudnn_convolution_backward_weight" + if not transpose + else "aten::cudnn_convolution_transpose_backward_weight" + ) + flags = [ + torch.backends.cudnn.benchmark, + torch.backends.cudnn.deterministic, + torch.backends.cudnn.allow_tf32, + ] + grad_weight = op( + weight_shape, + grad_output, + input, + padding, + stride, + dilation, + groups, + *flags, + ) + ctx.save_for_backward(grad_output, input) + + return grad_weight + + @staticmethod + def backward(ctx, grad_grad_weight): + grad_output, input = ctx.saved_tensors + grad_grad_output, grad_grad_input = None, None + + if ctx.needs_input_grad[0]: + grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) + + if ctx.needs_input_grad[1]: + p = calc_output_padding( + input_shape=input.shape, output_shape=grad_output.shape + ) + grad_grad_input = conv2d_gradfix( + transpose=(not transpose), + weight_shape=weight_shape, + output_padding=p, + **common_kwargs, + ).apply(grad_output, grad_grad_weight, None) + + return grad_grad_output, grad_grad_input + + conv2d_gradfix_cache[key] = Conv2d + + return Conv2d diff --git a/Evaluator_272/mld/models/operator/cross_attention.py b/Evaluator_272/mld/models/operator/cross_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..deb1f053e575bd0940d12a9cc526a44f689f24c0 --- /dev/null +++ b/Evaluator_272/mld/models/operator/cross_attention.py @@ -0,0 +1,412 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import List, Optional +from numpy import block + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + + +class SkipTransformerEncoder(nn.Module): + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.d_model = encoder_layer.d_model + + self.num_layers = num_layers + self.norm = norm + + assert num_layers % 2 == 1 + + num_block = (num_layers-1)//2 + self.input_blocks = _get_clones(encoder_layer, num_block) + self.middle_block = _get_clone(encoder_layer) + self.output_blocks = _get_clones(encoder_layer, num_block) + self.linear_blocks = _get_clones(nn.Linear(2*self.d_model, self.d_model), num_block) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + x = src + + xs = [] + for module in self.input_blocks: + x = module(x, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + xs.append(x) + + x = self.middle_block(x, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + for (module, linear) in zip(self.output_blocks, self.linear_blocks): + x = torch.cat([x, xs.pop()], dim=-1) + x = linear(x) + x = module(x, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + x = self.norm(x) + return x + +class SkipTransformerDecoder(nn.Module): + def __init__(self, decoder_layer, num_layers, norm=None): + super().__init__() + self.d_model = decoder_layer.d_model + + self.num_layers = num_layers + self.norm = norm + + assert num_layers % 2 == 1 + + num_block = (num_layers-1)//2 + self.input_blocks = _get_clones(decoder_layer, num_block) + self.middle_block = _get_clone(decoder_layer) + self.output_blocks = _get_clones(decoder_layer, num_block) + self.linear_blocks = _get_clones(nn.Linear(2*self.d_model, self.d_model), num_block) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + x = tgt + + xs = [] + for module in self.input_blocks: + x = module(x, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + xs.append(x) + + x = self.middle_block(x, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + + for (module, linear) in zip(self.output_blocks, self.linear_blocks): + x = torch.cat([x, xs.pop()], dim=-1) + x = linear(x) + x = module(x, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + + if self.norm is not None: + x = self.norm(x) + + return x + +class Transformer(nn.Module): + + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False): + super().__init__() + + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + mask = mask.flatten(1) + + tgt = torch.zeros_like(query_embed) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.d_model = d_model + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.d_model = d_model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + + +def _get_clone(module): + return copy.deepcopy(module) + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + ) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") \ No newline at end of file diff --git a/Evaluator_272/mld/models/operator/position_encoding.py b/Evaluator_272/mld/models/operator/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a2bf7030ed445f12761b581741145a3ad98072 --- /dev/null +++ b/Evaluator_272/mld/models/operator/position_encoding.py @@ -0,0 +1,185 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +from typing import List, Optional + +import numpy as np +import torch +from torch import Tensor, nn + +# from util.misc import NestedTensor + + +class NestedTensor(object): + + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, + dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = torch.cat([ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], + dim=-1).permute(2, 0, 1).unsqueeze(0).repeat( + x.shape[0], 1, 1, 1) + return pos + + +class PositionEmbeddingSine1D(nn.Module): + + def __init__(self, d_model, max_len=500, batch_first=False): + super().__init__() + self.batch_first = batch_first + + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange( + 0, d_model, 2).float() * (-np.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0).transpose(0, 1) + + self.register_buffer('pe', pe) + + def forward(self, x): + # not used in the final model + if self.batch_first: + pos = self.pe.permute(1, 0, 2)[:, :x.shape[1], :] + else: + pos = self.pe[:x.shape[0], :] + return pos + + +class PositionEmbeddingLearned1D(nn.Module): + + def __init__(self, d_model, max_len=500, batch_first=False): + super().__init__() + self.batch_first = batch_first + # self.dropout = nn.Dropout(p=dropout) + + self.pe = nn.Parameter(torch.zeros(max_len, 1, d_model)) + # self.pe = pe.unsqueeze(0).transpose(0, 1) + + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.pe) + + def forward(self, x): + # not used in the final model + if self.batch_first: + pos = self.pe.permute(1, 0, 2)[:, :x.shape[1], :] + else: + x = x + self.pe[:x.shape[0], :] + return x + # return self.dropout(x) + + +def build_position_encoding(N_steps, + position_embedding="sine", + embedding_dim="1D"): + # N_steps = hidden_dim // 2 + if embedding_dim == "1D": + if position_embedding in ('v2', 'sine'): + position_embedding = PositionEmbeddingSine1D(N_steps) + elif position_embedding in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned1D(N_steps) + else: + raise ValueError(f"not supported {position_embedding}") + elif embedding_dim == "2D": + if position_embedding in ('v2', 'sine'): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSine(N_steps, normalize=True) + elif position_embedding in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned(N_steps) + else: + raise ValueError(f"not supported {position_embedding}") + else: + raise ValueError(f"not supported {embedding_dim}") + + return position_embedding diff --git a/Evaluator_272/mld/models/operator/position_encoding_layer.py b/Evaluator_272/mld/models/operator/position_encoding_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..699c860bf5d28c384390196b086d93552b2cff64 --- /dev/null +++ b/Evaluator_272/mld/models/operator/position_encoding_layer.py @@ -0,0 +1,30 @@ +import numpy as np +import torch +from torch import nn + + +class PositionalEncoding(nn.Module): + + def __init__(self, d_model, dropout=0.1, max_len=5000, batch_first=False): + super().__init__() + self.batch_first = batch_first + + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange( + 0, d_model, 2).float() * (-np.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0).transpose(0, 1) + + self.register_buffer("pe", pe) + + def forward(self, x): + # not used in the final model + if self.batch_first: + x = x + self.pe.permute(1, 0, 2)[:, : x.shape[1], :] + else: + x = x + self.pe[: x.shape[0], :] + return self.dropout(x) diff --git a/Evaluator_272/mld/models/operator/self_attention.py b/Evaluator_272/mld/models/operator/self_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/tools/__init__.py b/Evaluator_272/mld/models/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/models/tools/hessian_penalty.py b/Evaluator_272/mld/models/tools/hessian_penalty.py new file mode 100644 index 0000000000000000000000000000000000000000..d5081cd7ba942a7c47ebce2dbcb50affa1007767 --- /dev/null +++ b/Evaluator_272/mld/models/tools/hessian_penalty.py @@ -0,0 +1,138 @@ +""" +## Adapted to work with our "batches" +Official PyTorch implementation of the Hessian Penalty regularization term from https://arxiv.org/pdf/2008.10599.pdf +Author: Bill Peebles +TensorFlow Implementation (GPU + Multi-Layer): hessian_penalty_tf.py +Simple Pure NumPy Implementation: hessian_penalty_np.py + +Simple use case where you want to apply the Hessian Penalty to the output of net w.r.t. net_input: +>>> from hessian_penalty_pytorch import hessian_penalty +>>> net = MyNeuralNet() +>>> net_input = sample_input() +>>> loss = hessian_penalty(net, z=net_input) # Compute hessian penalty of net's output w.r.t. net_input +>>> loss.backward() # Compute gradients w.r.t. net's parameters + +If your network takes multiple inputs, simply supply them to hessian_penalty as you do in the net's forward pass. In the +following example, we assume BigGAN.forward takes a second input argument "y". Note that we always take the Hessian +Penalty w.r.t. the z argument supplied to hessian_penalty: +>>> from hessian_penalty_pytorch import hessian_penalty +>>> net = BigGAN() +>>> z_input = sample_z_vector() +>>> class_label = sample_class_label() +>>> loss = hessian_penalty(net, z=net_input, y=class_label) +>>> loss.backward() +""" + +import torch + + +def hessian_penalty(G, batch, k=2, epsilon=0.1, reduction=torch.max, return_separately=False, G_z=None, **G_kwargs): + """ + Official PyTorch Hessian Penalty implementation. + + Note: If you want to regularize multiple network activations simultaneously, you need to + make sure the function G you pass to hessian_penalty returns a list of those activations when it's called with + G(z, **G_kwargs). Otherwise, if G returns a tensor the Hessian Penalty will only be computed for the final + output of G. + + :param G: Function that maps input z to either a tensor or a list of tensors (activations) + :param z: Input to G that the Hessian Penalty will be computed with respect to + :param k: Number of Hessian directions to sample (must be >= 2) + :param epsilon: Amount to blur G before estimating Hessian (must be > 0) + :param reduction: Many-to-one function to reduce each pixel/neuron's individual hessian penalty into a final loss + :param return_separately: If False, hessian penalties for each activation output by G are automatically summed into + a final loss. If True, the hessian penalties for each layer will be returned in a list + instead. If G outputs a single tensor, setting this to True will produce a length-1 + list. + :param G_z: [Optional small speed-up] If you have already computed G(z, **G_kwargs) for the current training + iteration, then you can provide it here to reduce the number of forward passes of this method by 1 + :param G_kwargs: Additional inputs to G besides the z vector. For example, in BigGAN you + would pass the class label into this function via y= + + :return: A differentiable scalar (the hessian penalty), or a list of hessian penalties if return_separately is True + """ + if G_z is None: + G_z = G(batch, **G_kwargs) + z = batch["x"] + rademacher_size = torch.Size((k, *z.size())) # (k, N, z.size()) + dzs = epsilon * rademacher(rademacher_size, device=z.device) + second_orders = [] + for dz in dzs: # Iterate over each (N, z.size()) tensor in xs + central_second_order = multi_layer_second_directional_derivative(G, batch, dz, G_z, epsilon, **G_kwargs) + second_orders.append(central_second_order) # Appends a tensor with shape equal to G(z).size() + loss = multi_stack_var_and_reduce(second_orders, reduction, return_separately) # (k, G(z).size()) --> scalar + return loss + + +def rademacher(shape, device='cpu'): + """Creates a random tensor of size [shape] under the Rademacher distribution (P(x=1) == P(x=-1) == 0.5)""" + x = torch.empty(shape, device=device) + x.random_(0, 2) # Creates random tensor of 0s and 1s + x[x == 0] = -1 # Turn the 0s into -1s + return x + + +def multi_layer_second_directional_derivative(G, batch, dz, G_z, epsilon, **G_kwargs): + """Estimates the second directional derivative of G w.r.t. its input at z in the direction x""" + batch_plus = {**batch, "x": batch["x"] + dz} + batch_moins = {**batch, "x": batch["x"] - dz} + G_to_x = G(batch_plus, **G_kwargs) + G_from_x = G(batch_moins, **G_kwargs) + + G_to_x = listify(G_to_x) + G_from_x = listify(G_from_x) + G_z = listify(G_z) + + eps_sqr = epsilon ** 2 + sdd = [(G2x - 2 * G_z_base + Gfx) / eps_sqr for G2x, G_z_base, Gfx in zip(G_to_x, G_z, G_from_x)] + return sdd + + +def stack_var_and_reduce(list_of_activations, reduction=torch.max): + """Equation (5) from the paper.""" + second_orders = torch.stack(list_of_activations) # (k, N, C, H, W) + var_tensor = torch.var(second_orders, dim=0, unbiased=True) # (N, C, H, W) + penalty = reduction(var_tensor) # (1,) (scalar) + return penalty + + +def multi_stack_var_and_reduce(sdds, reduction=torch.max, return_separately=False): + """Iterate over all activations to be regularized, then apply Equation (5) to each.""" + sum_of_penalties = 0 if not return_separately else [] + for activ_n in zip(*sdds): + penalty = stack_var_and_reduce(activ_n, reduction) + sum_of_penalties += penalty if not return_separately else [penalty] + return sum_of_penalties + + +def listify(x): + """If x is already a list, do nothing. Otherwise, wrap x in a list.""" + if isinstance(x, list): + return x + else: + return [x] + + +def _test_hessian_penalty(): + """ + A simple multi-layer test to verify the implementation. + Function: G(z) = [z_0 * z_1, z_0**2 * z_1] + Ground Truth Hessian Penalty: [4, 16 * z_0**2] + """ + batch_size = 10 + nz = 2 + z = torch.randn(batch_size, nz) + def reduction(x): return x.abs().mean() + def G(z): return [z[:, 0] * z[:, 1], (z[:, 0] ** 2) * z[:, 1]] + ground_truth = [4, reduction(16 * z[:, 0] ** 2).item()] + # In this simple example, we use k=100 to reduce variance, but when applied to neural networks + # you will probably want to use a small k (e.g., k=2) due to memory considerations. + predicted = hessian_penalty(G, z, G_z=None, k=100, reduction=reduction, return_separately=True) + predicted = [p.item() for p in predicted] + print('Ground Truth: %s' % ground_truth) + print('Approximation: %s' % predicted) # This should be close to ground_truth, but not exactly correct + print('Difference: %s' % [str(100 * abs(p - gt) / gt) + '%' for p, gt in zip(predicted, ground_truth)]) + + +if __name__ == '__main__': + _test_hessian_penalty() diff --git a/Evaluator_272/mld/models/tools/tools.py b/Evaluator_272/mld/models/tools/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..89ecab5616c1f0d46ed5bc9b348c5e6ad3ee603d --- /dev/null +++ b/Evaluator_272/mld/models/tools/tools.py @@ -0,0 +1,37 @@ +import torch.nn as nn + +def remove_padding(tensors, lengths): + return [tensor[:tensor_length] for tensor, tensor_length in zip(tensors, lengths)] + +class AutoParams(nn.Module): + def __init__(self, **kargs): + try: + for param in self.needed_params: + if param in kargs: + setattr(self, param, kargs[param]) + else: + raise ValueError(f"{param} is needed.") + except : + pass + + try: + for param, default in self.optional_params.items(): + if param in kargs and kargs[param] is not None: + setattr(self, param, kargs[param]) + else: + setattr(self, param, default) + except : + pass + super().__init__() + + +# taken from joeynmt repo +def freeze_params(module: nn.Module) -> None: + """ + Freeze the parameters of this module, + i.e. do not update them during training + + :param module: freeze parameters of this module + """ + for _, p in module.named_parameters(): + p.requires_grad = False diff --git a/Evaluator_272/mld/tools/__init__.py b/Evaluator_272/mld/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/tools/geometry.py b/Evaluator_272/mld/tools/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..e6eafa2e1f2459a0f6f5ad1280c71e6a9625549e --- /dev/null +++ b/Evaluator_272/mld/tools/geometry.py @@ -0,0 +1,566 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# Check PYTORCH3D_LICENCE before use + +import functools +from typing import Optional + +import torch +import torch.nn.functional as F + + +""" +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) + +This matrix can be applied to column vectors by post multiplication +by the points e.g. + + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points + +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: + +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +""" + + +# Added +def matrix_of_angles(cos, sin, inv=False, dim=2): + assert dim in [2, 3] + sin = -sin if inv else sin + if dim == 2: + row1 = torch.stack((cos, -sin), axis=-1) + row2 = torch.stack((sin, cos), axis=-1) + return torch.stack((row1, row2), axis=-2) + elif dim == 3: + row1 = torch.stack((cos, -sin, 0*cos), axis=-1) + row2 = torch.stack((sin, cos, 0*cos), axis=-1) + row3 = torch.stack((0*sin, 0*cos, 1+0*cos), axis=-1) + return torch.stack((row1, row2, row3),axis=-2) + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a, b): + """ + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + + Returns: + Tensor of the same shape as a with the signs of b. + """ + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x): + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + + +def matrix_to_quaternion(matrix): + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + m00 = matrix[..., 0, 0] + m11 = matrix[..., 1, 1] + m22 = matrix[..., 2, 2] + o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22) + x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22) + y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22) + z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22) + o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2]) + o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0]) + o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1]) + return torch.stack((o0, o1, o2, o3), -1) + + +def _axis_angle_rotation(axis: str, angle): + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + if axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + if axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles, convention: str): + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) + return functools.reduce(torch.matmul, matrices) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +): + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str): + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + + +def matrix_to_euler_angles(matrix, convention: str): + """ + Convert rotations given as rotation matrices to Euler angles in radians. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + + Returns: + Euler angles in radians as tensor of shape (..., 3). + """ + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Quaternions as tensor of shape (N, 4). + """ + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random rotations as 3x3 rotation matrices. + + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + """ + quaternions = random_quaternions( + n, dtype=dtype, device=device, requires_grad=requires_grad + ) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate a single random 3x3 rotation matrix. + + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + requires_grad: Whether the resulting tensor should have the gradient + flag set + + Returns: + Rotation matrix as tensor of shape (3, 3). + """ + return random_rotations(1, dtype, device, requires_grad)[0] + + +def standardize_quaternion(quaternions): + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a, b): + """ + Multiply two quaternions. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + """ + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a, b): + """ + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + """ + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion): + """ + Given a quaternion representing rotation, get the quaternion representing + its inverse. + + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + """ + + return quaternion * quaternion.new_tensor([1, -1, -1, -1]) + + +def quaternion_apply(quaternion, point): + """ + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + + Returns: + Tensor of rotated points of shape (..., 3). + """ + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, f{point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle): + """ + Convert rotations given as axis/angle to rotation matrices. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix): + """ + Convert rotations given as rotation matrices to axis/angle. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle): + """ + Convert rotations given as axis/angle to quaternions. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions): + """ + Convert rotations given as quaternions to axis/angle. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + """ + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalisation per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + + Returns: + batch of rotation matrices of size (*, 3, 3) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + """ + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + + Returns: + 6D rotation representation, of size (*, 6) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) diff --git a/Evaluator_272/mld/tools/logging.py b/Evaluator_272/mld/tools/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..216e521a1d203b8dc6f436dc91a6a6631419bb65 --- /dev/null +++ b/Evaluator_272/mld/tools/logging.py @@ -0,0 +1,40 @@ +import logging +import tqdm + + +class LevelsFilter(logging.Filter): + def __init__(self, levels): + self.levels = [getattr(logging, level) for level in levels] + + def filter(self, record): + return record.levelno in self.levels + + +class StreamToLogger(object): + """ + Fake file-like stream object that redirects writes to a logger instance. + """ + def __init__(self, logger, level): + self.logger = logger + self.level = level + self.linebuf = '' + + def write(self, buf): + for line in buf.rstrip().splitlines(): + self.logger.log(self.level, line.rstrip()) + + def flush(self): + pass + + +class TqdmLoggingHandler(logging.Handler): + def __init__(self, level=logging.NOTSET): + super().__init__(level) + + def emit(self, record): + try: + msg = self.format(record) + tqdm.tqdm.write(msg) + self.flush() + except Exception: + self.handleError(record) diff --git a/Evaluator_272/mld/tools/runid.py b/Evaluator_272/mld/tools/runid.py new file mode 100644 index 0000000000000000000000000000000000000000..619e7696481eb3f91a0133fda4bce947cc853580 --- /dev/null +++ b/Evaluator_272/mld/tools/runid.py @@ -0,0 +1,13 @@ +# +""" +runid util. +Taken from wandb.sdk.lib.runid +""" + +import shortuuid # type: ignore + + +def generate_id() -> str: + # ~3t run ids (36**8) + run_gen = shortuuid.ShortUUID(alphabet=list("0123456789abcdefghijklmnopqrstuvwxyz")) + return run_gen.random(8) \ No newline at end of file diff --git a/Evaluator_272/mld/transforms/__init__.py b/Evaluator_272/mld/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c0ab9179fd0ed93c98ce0e9c90d75484594faa --- /dev/null +++ b/Evaluator_272/mld/transforms/__init__.py @@ -0,0 +1,3 @@ +from .base import Transform +from .smpl import SMPLTransform +# from .xyz import XYZTransform diff --git a/Evaluator_272/mld/transforms/base.py b/Evaluator_272/mld/transforms/base.py new file mode 100644 index 0000000000000000000000000000000000000000..2685e2b3fba90e1400c87903e244eae617d99e8f --- /dev/null +++ b/Evaluator_272/mld/transforms/base.py @@ -0,0 +1,68 @@ +from dataclasses import dataclass, fields + + +class Transform: + + def collate(self, lst_datastruct): + from mld.datasets.utils import collate_tensor_with_padding + example = lst_datastruct[0] + + def collate_or_none(key): + if example[key] is None: + return None + key_lst = [x[key] for x in lst_datastruct] + return collate_tensor_with_padding(key_lst) + + kwargs = {key: collate_or_none(key) for key in example.datakeys} + + return self.Datastruct(**kwargs) + + +# Inspired from SMPLX library +# need to define "datakeys" and transforms +@dataclass +class Datastruct: + + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + self.__dict__[key] = value + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + return self.keys() + + def keys(self): + keys = [t.name for t in fields(self)] + return iter(keys) + + def values(self): + values = [getattr(self, t.name) for t in fields(self)] + return iter(values) + + def items(self): + data = [(t.name, getattr(self, t.name)) for t in fields(self)] + return iter(data) + + def to(self, *args, **kwargs): + for key in self.datakeys: + if self[key] is not None: + self[key] = self[key].to(*args, **kwargs) + return self + + @property + def device(self): + return self[self.datakeys[0]].device + + def detach(self): + + def detach_or_none(tensor): + if tensor is not None: + return tensor.detach() + return None + + kwargs = {key: detach_or_none(self[key]) for key in self.datakeys} + return self.transforms.Datastruct(**kwargs) diff --git a/Evaluator_272/mld/transforms/feats2smpl.py b/Evaluator_272/mld/transforms/feats2smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c8a5d9bfb844359e5910f3b9611c26190f70dc --- /dev/null +++ b/Evaluator_272/mld/transforms/feats2smpl.py @@ -0,0 +1,35 @@ +from os.path import join as pjoin + +import numpy as np +import torch + +import mld.data.humanml.utils.paramUtil as paramUtil +from mld.data.humanml.data.dataset import Text2MotionDatasetV2 +from mld.data.humanml.scripts.motion_process import recover_from_ric +from mld.data.humanml.utils.plot_script import plot_3d_motion + +skeleton = paramUtil.t2m_kinematic_chain + + + + +def main(): + data_root = '../datasets/humanml3d' + feastures_path = 'in.npy' + animation_save_path = 'in.mp4' + + fps = 20 + mean = np.load(pjoin(data_root, 'Mean.npy')) + std = np.load(pjoin(data_root, 'Std.npy')) + + motion = np.load(feastures_path) + motion = motion * std + mean + motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy() + # with open('in_22.npy', 'wb') as f: + # np.save(f,motion_rec) + motion_rec = motion_rec * 1.3 + plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps) + + +if __name__ == '__main__': + main() diff --git a/Evaluator_272/mld/transforms/identity.py b/Evaluator_272/mld/transforms/identity.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e5540c8da75f6f839cd0e12672906273276dcc --- /dev/null +++ b/Evaluator_272/mld/transforms/identity.py @@ -0,0 +1,28 @@ +from typing import Optional +from torch import Tensor + +from .base import Datastruct, dataclass, Transform + + +class IdentityTransform(Transform): + def __init__(self, **kwargs): + return + + def Datastruct(self, **kwargs): + return IdentityDatastruct(**kwargs) + + def __repr__(self): + return "IdentityTransform()" + + +@dataclass +class IdentityDatastruct(Datastruct): + transforms: IdentityTransform + + features: Optional[Tensor] = None + + def __post_init__(self): + self.datakeys = ["features"] + + def __len__(self): + return len(self.rfeats) diff --git a/Evaluator_272/mld/transforms/joints2jfeats/__init__.py b/Evaluator_272/mld/transforms/joints2jfeats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a924e845912842ec042b5b3195b8da7aee3f252 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2jfeats/__init__.py @@ -0,0 +1,2 @@ +from .base import Joints2Jfeats +from .rifke import Rifke diff --git a/Evaluator_272/mld/transforms/joints2jfeats/base.py b/Evaluator_272/mld/transforms/joints2jfeats/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a61a3b42848f5c5d7f803cc700d1b08bb4ecdbc5 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2jfeats/base.py @@ -0,0 +1,34 @@ +from typing import Optional + +import torch +from torch import Tensor, nn +from pathlib import Path + + +class Joints2Jfeats(nn.Module): + def __init__(self, path: Optional[str] = None, + normalization: bool = False, + eps: float = 1e-12, + **kwargs) -> None: + if normalization and path is None: + raise TypeError("You should provide a path if normalization is on.") + + super().__init__() + self.normalization = normalization + self.eps = eps + + if normalization: + mean_path = Path(path) / "jfeats_mean.pt" + std_path = Path(path) / "jfeats_std.pt" + self.register_buffer('mean', torch.load(mean_path)) + self.register_buffer('std', torch.load(std_path)) + + def normalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = (features - self.mean)/(self.std + self.eps) + return features + + def unnormalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = features * self.std + self.mean + return features diff --git a/Evaluator_272/mld/transforms/joints2jfeats/rifke.py b/Evaluator_272/mld/transforms/joints2jfeats/rifke.py new file mode 100644 index 0000000000000000000000000000000000000000..db97bd8338abe9c527c1c23a4ca5c5ea738867b1 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2jfeats/rifke.py @@ -0,0 +1,142 @@ +from typing import Optional + +import torch +from einops import rearrange +from torch import Tensor +from mld.utils.geometry import matrix_of_angles +from .base import Joints2Jfeats +from .tools import get_forward_direction, get_floor, gaussian_filter1d, T # noqa + + +class Rifke(Joints2Jfeats): + + def __init__(self, + jointstype: str = "mmm", + path: Optional[str] = None, + normalization: bool = False, + forward_filter: bool = False, + **kwargs) -> None: + if jointstype not in ["mmm", "mmmns", 'humanml3d', "motionx", "motionx_v26"]: + print("This function assume that the root is the first index") + raise NotImplementedError("This jointstype is not implemented.") + + super().__init__(path=path, normalization=normalization) + self.jointstype = jointstype + self.forward_filter = forward_filter + + def forward(self, joints: Tensor) -> Tensor: + # Joints to rotation invariant poses (Holden et. al.) + # Similar function than fke2rifke in Language2Pose repository + # Adapted to pytorch + # Put the origin center of the root joint instead of the ground projection + + poses = joints.clone() + poses[..., 1] -= get_floor(poses, jointstype=self.jointstype) + + translation = poses[..., 0, :].clone() + # Let the root have the Y translation + root_y = translation[..., 1] + + # Trajectory => Translation without gravity axis (Y) + trajectory = translation[..., [0, 2]] + + # Delete the root joints of the poses + poses = poses[..., 1:, :] + + # Remove the trajectory of the poses + poses[..., [0, 2]] -= trajectory[..., None, :] + + # Compute the trajectory + vel_trajectory = torch.diff(trajectory, dim=-2) + # 0 for the first one => keep the dimentionality + vel_trajectory = torch.cat( + (0 * vel_trajectory[..., [0], :], vel_trajectory), dim=-2) + + # Compute the forward direction + forward = get_forward_direction(poses, jointstype=self.jointstype) + if self.forward_filter: + # Smoothing to remove high frequencies + forward = gaussian_filter1d(forward, 2) + # normalize again to get real directions + forward = torch.nn.functional.normalize(forward, dim=-1) + + angles = T(torch.atan2(*T(forward))) + vel_angles = torch.diff(angles, dim=-1) + # 0 for the first one => keep the dimentionality + vel_angles = torch.cat((0 * vel_angles[..., [0]], vel_angles), dim=-1) + + # Construct the inverse rotation matrix + sin, cos = forward[..., 0], forward[..., 1] + rotations_inv = matrix_of_angles(cos, sin, inv=True) + + # Rotate the poses + poses_local = torch.einsum("...lj,...jk->...lk", poses[..., [0, 2]], + rotations_inv) + poses_local = torch.stack( + (poses_local[..., 0], poses[..., 1], poses_local[..., 1]), axis=-1) + + # stack the xyz joints into feature vectors + poses_features = rearrange(poses_local, + "... joints xyz -> ... (joints xyz)") + + # Rotate the vel_trajectory + vel_trajectory_local = torch.einsum("...j,...jk->...k", vel_trajectory, + rotations_inv) + # Stack things together + features = torch.cat((root_y[..., None], poses_features, + vel_angles[..., None], vel_trajectory_local), -1) + + # Normalize if needed + features = self.normalize(features) + return features + + def inverse(self, features: Tensor) -> Tensor: + features = self.unnormalize(features) + root_y, poses_features, vel_angles, vel_trajectory_local = self.extract( + features) + + # already have the good dimensionality + angles = torch.cumsum(vel_angles, dim=-1) + # First frame should be 0, but if infered it is better to ensure it + angles = angles - angles[..., [0]] + + cos, sin = torch.cos(angles), torch.sin(angles) + rotations = matrix_of_angles(cos, sin, inv=False) + + # Get back the poses + poses_local = rearrange(poses_features, + "... (joints xyz) -> ... joints xyz", + xyz=3) + + # Rotate the poses + poses = torch.einsum("...lj,...jk->...lk", poses_local[..., [0, 2]], + rotations) + poses = torch.stack( + (poses[..., 0], poses_local[..., 1], poses[..., 1]), axis=-1) + + # Rotate the vel_trajectory + vel_trajectory = torch.einsum("...j,...jk->...k", vel_trajectory_local, + rotations) + # Integrate the trajectory + # Already have the good dimensionality + trajectory = torch.cumsum(vel_trajectory, dim=-2) + # First frame should be 0, but if infered it is better to ensure it + trajectory = trajectory - trajectory[..., [0], :] + + # Add the root joints (which is still zero) + poses = torch.cat((0 * poses[..., [0], :], poses), -2) + + # put back the root joint y + poses[..., 0, 1] = root_y + + # Add the trajectory globally + poses[..., [0, 2]] += trajectory[..., None, :] + return poses + + def extract(self, features: Tensor) -> tuple: + root_y = features[..., 0] + poses_features = features[..., 1:-3] + vel_angles = features[..., -3] + vel_trajectory_local = features[..., -2:] + + return root_y, poses_features, vel_angles, vel_trajectory_local diff --git a/Evaluator_272/mld/transforms/joints2jfeats/tools.py b/Evaluator_272/mld/transforms/joints2jfeats/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ad8eceb937ef841d38f1183aabb856793a377c --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2jfeats/tools.py @@ -0,0 +1,92 @@ +import torch +import torch.nn.functional as F + +from mld.utils.joints import mmm_joints, humanml3d_joints, motionx_joints + +# Get the indexes of particular body part + + +# .T is deprecated now for reversing a tensor +def T(x): + return x.permute(*torch.arange(x.ndim - 1, -1, -1)) + + +def get_forward_direction(poses, jointstype="mmm"): + if jointstype == "mmm" or jointstype == "mmmns": + joints = mmm_joints + elif jointstype == "humanml3d": + joints = humanml3d_joints + elif jointstype in ["motionx", "motionx_v26"]: + joints = motionx_joints + else: + raise TypeError('Only supports mmm, mmmns and humanl3d jointstype') + # Shoulders + LS, RS = joints.index("LS"), joints.index("RS") + # Hips + LH, RH = joints.index("LH"), joints.index("RH") + + across = poses[..., RH, :] - poses[..., LH, :] + poses[..., RS, :] - poses[ + ..., LS, :] + forward = torch.stack((-across[..., 2], across[..., 0]), axis=-1) + forward = torch.nn.functional.normalize(forward, dim=-1) + return forward + + +def get_floor(poses, jointstype="mmm"): + if jointstype == "mmm" or jointstype == "mmmns": + joints = mmm_joints + elif jointstype == "humanml3d": + joints = humanml3d_joints + elif jointstype in ["motionx", "motionx_v26"]: + joints = motionx_joints + else: + raise TypeError('Only supports mmm, mmmns and humanl3d jointstype') + ndim = len(poses.shape) + # Feet + LM, RM = joints.index("LMrot"), joints.index("RMrot") + LF, RF = joints.index("LF"), joints.index("RF") + # import pdb; pdb.set_trace() + foot_heights = poses[..., (LM, LF, RM, RF), 1].min(-1).values + floor_height = softmin(foot_heights, softness=0.5, dim=-1) + return T(floor_height[(ndim - 2) * [None]]) + + +def softmax(x, softness=1.0, dim=None): + maxi, mini = x.max(dim=dim).values, x.min(dim=dim).values + return maxi + torch.log(softness + torch.exp(mini - maxi)) + + +def softmin(x, softness=1.0, dim=0): + return -softmax(-x, softness=softness, dim=dim) + + +def gaussian_filter1d(_inputs, sigma, truncate=4.0): + # Code adapted/mixed from scipy library into pytorch + # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/ndimage/filters.py#L211 + # and gaussian kernel + # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/ndimage/filters.py#L179 + # Correspond to mode="nearest" and order = 0 + # But works batched + if len(_inputs.shape) == 2: + inputs = _inputs[None] + else: + inputs = _inputs + + sd = float(sigma) + radius = int(truncate * sd + 0.5) + sigma2 = sigma * sigma + x = torch.arange(-radius, + radius + 1, + device=inputs.device, + dtype=inputs.dtype) + phi_x = torch.exp(-0.5 / sigma2 * x**2) + phi_x = phi_x / phi_x.sum() + + # Conv1d weights + groups = inputs.shape[-1] + weights = torch.tile(phi_x, (groups, 1, 1)) + inputs = inputs.transpose(-1, -2) + outputs = F.conv1d(inputs, weights, padding="same", + groups=groups).transpose(-1, -2) + + return outputs.reshape(_inputs.shape) diff --git a/Evaluator_272/mld/transforms/joints2rots/config.py b/Evaluator_272/mld/transforms/joints2rots/config.py new file mode 100644 index 0000000000000000000000000000000000000000..91e3a646f456e0a78ee9ff177ff87c941f9c01ba --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2rots/config.py @@ -0,0 +1,119 @@ +import numpy as np +from mld.utils.joints import mmm_joints, smplh2mmm_indexes + +# Map joints Name to SMPL joints idx +JOINT_MAP = { + 'MidHip': 0, + 'LHip': 1, + 'LKnee': 4, + 'LAnkle': 7, + 'LFoot': 10, + 'RHip': 2, + 'RKnee': 5, + 'RAnkle': 8, + 'RFoot': 11, + 'LShoulder': 16, + 'LElbow': 18, + 'LWrist': 20, + 'LHand': 22, + 'RShoulder': 17, + 'RElbow': 19, + 'RWrist': 21, + 'RHand': 23, + 'spine1': 3, + 'spine2': 6, + 'spine3': 9, + 'Neck': 12, + 'Head': 15, + 'LCollar': 13, + 'Rcollar': 14, + 'Nose': 24, + 'REye': 26, + 'LEye': 26, + 'REar': 27, + 'LEar': 28, + 'LHeel': 31, + 'RHeel': 34, + 'OP RShoulder': 17, + 'OP LShoulder': 16, + 'OP RHip': 2, + 'OP LHip': 1, + 'OP Neck': 12, +} + +mmm2smpl_correspondence = { + "root": "MidHip", + "BP": "spine1", + "BT": "spine3", + "BLN": "Neck", + "BUN": "Head", + "LS": "LShoulder", + "LE": "LElbow", + "LW": "LWrist", + "RS": "RShoulder", + "RE": "RElbow", + "RW": "RWrist", + "LH": "LHip", + "LK": "LKnee", + "LA": "LAnkle", + "LMrot": "LHeel", + "LF": "LFoot", + "RH": "RHip", + "RK": "RKnee", + "RA": "RAnkle", + "RMrot": "RHeel", + "RF": "RFoot" +} + +full_smpl_idx = range(24) +key_smpl_idx = [0, 1, 4, 7, 2, 5, 8, 17, 19, 21, 16, 18, 20] + +AMASS_JOINT_MAP = { + 'MidHip': 0, + 'LHip': 1, + 'LKnee': 4, + 'LAnkle': 7, + 'LFoot': 10, + 'RHip': 2, + 'RKnee': 5, + 'RAnkle': 8, + 'RFoot': 11, + 'LShoulder': 16, + 'LElbow': 18, + 'LWrist': 20, + 'RShoulder': 17, + 'RElbow': 19, + 'RWrist': 21, + 'spine1': 3, + 'spine2': 6, + 'spine3': 9, + 'Neck': 12, + 'Head': 15, + 'LCollar': 13, + 'Rcollar': 14, +} +amass_idx = range(22) +amass_smpl_idx = range(22) + +# cal mmm in smpl index +smpl2mmm_correspondence = { + val: key + for key, val in mmm2smpl_correspondence.items() +} +smpl2mmm_indexes = [JOINT_MAP[mmm2smpl_correspondence[x]] for x in mmm_joints] + +# cal mmm joints map +MMM_JOINT_MAP = { + val: JOINT_MAP[val] + for key, val in mmm2smpl_correspondence.items() +} + +# mmm_idx = range(21) +# mmm_smpl_dix = smpl2mmm_indexes +# mmm_smpl_dix = smplh2mmm_indexes +# todo - configable +SMPL_MODEL_DIR = "/apdcephfs/share_1227775/shingxchen/AIMotion/TMOSTData/deps/smpl_models/" +GMM_MODEL_DIR = "/apdcephfs/share_1227775/shingxchen/AIMotion/TMOSTData/deps/smpl_models/" +SMPL_MEAN_FILE = "/apdcephfs/share_1227775/shingxchen/AIMotion/TMOSTData/deps/smpl_models/neutral_smpl_mean_params.h5" +# for collsion +Part_Seg_DIR = "/apdcephfs/share_1227775/shingxchen/AIMotion/TMOSTData/deps/smpl_models/smplx_parts_segm.pkl" diff --git a/Evaluator_272/mld/transforms/joints2rots/customloss.py b/Evaluator_272/mld/transforms/joints2rots/customloss.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3c3a530876113596f223324dc9dd0c002fd520 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2rots/customloss.py @@ -0,0 +1,217 @@ +import torch +import torch.nn.functional as F +import config + +# Guassian +def gmof(x, sigma): + """ + Geman-McClure error function + """ + x_squared = x ** 2 + sigma_squared = sigma ** 2 + return (sigma_squared * x_squared) / (sigma_squared + x_squared) + +# angle prior +def angle_prior(pose): + """ + Angle prior that penalizes unnatural bending of the knees and elbows + """ + # We subtract 3 because pose does not include the global rotation of the model + return torch.exp( + pose[:, [55 - 3, 58 - 3, 12 - 3, 15 - 3]] * torch.tensor([1., -1., -1, -1.], device=pose.device)) ** 2 + + +def perspective_projection(points, rotation, translation, + focal_length, camera_center): + """ + This function computes the perspective projection of a set of points. + Input: + points (bs, N, 3): 3D points + rotation (bs, 3, 3): Camera rotation + translation (bs, 3): Camera translation + focal_length (bs,) or scalar: Focal length + camera_center (bs, 2): Camera center + """ + batch_size = points.shape[0] + K = torch.zeros([batch_size, 3, 3], device=points.device) + K[:, 0, 0] = focal_length + K[:, 1, 1] = focal_length + K[:, 2, 2] = 1. + K[:, :-1, -1] = camera_center + + # Transform points + points = torch.einsum('bij,bkj->bki', rotation, points) + points = points + translation.unsqueeze(1) + + # Apply perspective distortion + projected_points = points / points[:, :, -1].unsqueeze(-1) + + # Apply camera intrinsics + projected_points = torch.einsum('bij,bkj->bki', K, projected_points) + + return projected_points[:, :, :-1] + + +def body_fitting_loss(body_pose, betas, model_joints, camera_t, camera_center, + joints_2d, joints_conf, pose_prior, + focal_length=5000, sigma=100, pose_prior_weight=4.78, + shape_prior_weight=5, angle_prior_weight=15.2, + output='sum'): + """ + Loss function for body fitting + """ + batch_size = body_pose.shape[0] + rotation = torch.eye(3, device=body_pose.device).unsqueeze(0).expand(batch_size, -1, -1) + + projected_joints = perspective_projection(model_joints, rotation, camera_t, + focal_length, camera_center) + + # Weighted robust reprojection error + reprojection_error = gmof(projected_joints - joints_2d, sigma) + reprojection_loss = (joints_conf ** 2) * reprojection_error.sum(dim=-1) + + # Pose prior loss + pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas) + + # Angle prior for knees and elbows + angle_prior_loss = (angle_prior_weight ** 2) * angle_prior(body_pose).sum(dim=-1) + + # Regularizer to prevent betas from taking large values + shape_prior_loss = (shape_prior_weight ** 2) * (betas ** 2).sum(dim=-1) + + total_loss = reprojection_loss.sum(dim=-1) + pose_prior_loss + angle_prior_loss + shape_prior_loss + + if output == 'sum': + return total_loss.sum() + elif output == 'reprojection': + return reprojection_loss + + +# --- get camera fitting loss ----- +def camera_fitting_loss(model_joints, camera_t, camera_t_est, camera_center, + joints_2d, joints_conf, + focal_length=5000, depth_loss_weight=100): + """ + Loss function for camera optimization. + """ + # Project model joints + batch_size = model_joints.shape[0] + rotation = torch.eye(3, device=model_joints.device).unsqueeze(0).expand(batch_size, -1, -1) + projected_joints = perspective_projection(model_joints, rotation, camera_t, + focal_length, camera_center) + + # get the indexed four + op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder'] + op_joints_ind = [config.JOINT_MAP[joint] for joint in op_joints] + gt_joints = ['RHip', 'LHip', 'RShoulder', 'LShoulder'] + gt_joints_ind = [config.JOINT_MAP[joint] for joint in gt_joints] + + reprojection_error_op = (joints_2d[:, op_joints_ind] - + projected_joints[:, op_joints_ind]) ** 2 + reprojection_error_gt = (joints_2d[:, gt_joints_ind] - + projected_joints[:, gt_joints_ind]) ** 2 + + # Check if for each example in the batch all 4 OpenPose detections are valid, otherwise use the GT detections + # OpenPose joints are more reliable for this task, so we prefer to use them if possible + is_valid = (joints_conf[:, op_joints_ind].min(dim=-1)[0][:, None, None] > 0).float() + reprojection_loss = (is_valid * reprojection_error_op + (1 - is_valid) * reprojection_error_gt).sum(dim=(1, 2)) + + # Loss that penalizes deviation from depth estimate + depth_loss = (depth_loss_weight ** 2) * (camera_t[:, 2] - camera_t_est[:, 2]) ** 2 + + total_loss = reprojection_loss + depth_loss + return total_loss.sum() + + + + # #####--- body fitiing loss ----- +def body_fitting_loss_3d(body_pose, preserve_pose, + betas, model_joints, camera_translation, + j3d, pose_prior, + joints3d_conf, + sigma=100, pose_prior_weight=4.78*1.5, + shape_prior_weight=5.0, angle_prior_weight=15.2, + joint_loss_weight=500.0, + pose_preserve_weight=0.0, + use_collision=False, + model_vertices=None, model_faces=None, + search_tree=None, pen_distance=None, filter_faces=None, + collision_loss_weight=1000 + ): + """ + Loss function for body fitting + """ + batch_size = body_pose.shape[0] + + #joint3d_loss = (joint_loss_weight ** 2) * gmof((model_joints + camera_translation) - j3d, sigma).sum(dim=-1) + + joint3d_error = gmof((model_joints + camera_translation) - j3d, sigma) + + joint3d_loss_part = (joints3d_conf ** 2) * joint3d_error.sum(dim=-1) + joint3d_loss = (joint_loss_weight ** 2) * joint3d_loss_part + + # Pose prior loss + pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas) + # Angle prior for knees and elbows + angle_prior_loss = (angle_prior_weight ** 2) * angle_prior(body_pose).sum(dim=-1) + # Regularizer to prevent betas from taking large values + shape_prior_loss = (shape_prior_weight ** 2) * (betas ** 2).sum(dim=-1) + + collision_loss = 0.0 + # Calculate the loss due to interpenetration + if use_collision: + triangles = torch.index_select( + model_vertices, 1, + model_faces).view(batch_size, -1, 3, 3) + + with torch.no_grad(): + collision_idxs = search_tree(triangles) + + # Remove unwanted collisions + if filter_faces is not None: + collision_idxs = filter_faces(collision_idxs) + + if collision_idxs.ge(0).sum().item() > 0: + collision_loss = torch.sum(collision_loss_weight * pen_distance(triangles, collision_idxs)) + + pose_preserve_loss = (pose_preserve_weight ** 2) * ((body_pose - preserve_pose) ** 2).sum(dim=-1) + + total_loss = joint3d_loss + pose_prior_loss + angle_prior_loss + shape_prior_loss + collision_loss + pose_preserve_loss + + return total_loss.sum() + + +# #####--- get camera fitting loss ----- +def camera_fitting_loss_3d(model_joints, camera_t, camera_t_est, + j3d, joints_category="orig", depth_loss_weight=100.0): + """ + Loss function for camera optimization. + """ + model_joints = model_joints + camera_t + # # get the indexed four + # op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder'] + # op_joints_ind = [config.JOINT_MAP[joint] for joint in op_joints] + # + # j3d_error_loss = (j3d[:, op_joints_ind] - + # model_joints[:, op_joints_ind]) ** 2 + + gt_joints = ['RHip', 'LHip', 'RShoulder', 'LShoulder'] + gt_joints_ind = [config.JOINT_MAP[joint] for joint in gt_joints] + + if joints_category=="orig": + select_joints_ind = [config.JOINT_MAP[joint] for joint in gt_joints] + elif joints_category=="AMASS": + select_joints_ind = [config.AMASS_JOINT_MAP[joint] for joint in gt_joints] + elif joints_category=="MMM": + select_joints_ind = [config.MMM_JOINT_MAP[joint] for joint in gt_joints] + else: + print("NO SUCH JOINTS CATEGORY!") + + j3d_error_loss = (j3d[:, select_joints_ind] - + model_joints[:, gt_joints_ind]) ** 2 + + # Loss that penalizes deviation from depth estimate + depth_loss = (depth_loss_weight**2) * (camera_t - camera_t_est)**2 + + total_loss = j3d_error_loss + depth_loss + return total_loss.sum() \ No newline at end of file diff --git a/Evaluator_272/mld/transforms/joints2rots/prior.py b/Evaluator_272/mld/transforms/joints2rots/prior.py new file mode 100644 index 0000000000000000000000000000000000000000..d85debddd185d44082f6ac14fdaa606d4deebd40 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2rots/prior.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import sys +import os + +import time +import pickle + +import numpy as np + +import torch +import torch.nn as nn + +DEFAULT_DTYPE = torch.float32 + + +def create_prior(prior_type, **kwargs): + if prior_type == 'gmm': + prior = MaxMixturePrior(**kwargs) + elif prior_type == 'l2': + return L2Prior(**kwargs) + elif prior_type == 'angle': + return SMPLifyAnglePrior(**kwargs) + elif prior_type == 'none' or prior_type is None: + # Don't use any pose prior + def no_prior(*args, **kwargs): + return 0.0 + prior = no_prior + else: + raise ValueError('Prior {}'.format(prior_type) + ' is not implemented') + return prior + + +class SMPLifyAnglePrior(nn.Module): + def __init__(self, dtype=torch.float32, **kwargs): + super(SMPLifyAnglePrior, self).__init__() + + # Indices for the roration angle of + # 55: left elbow, 90deg bend at -np.pi/2 + # 58: right elbow, 90deg bend at np.pi/2 + # 12: left knee, 90deg bend at np.pi/2 + # 15: right knee, 90deg bend at np.pi/2 + angle_prior_idxs = np.array([55, 58, 12, 15], dtype=np.int64) + angle_prior_idxs = torch.tensor(angle_prior_idxs, dtype=torch.long) + self.register_buffer('angle_prior_idxs', angle_prior_idxs) + + angle_prior_signs = np.array([1, -1, -1, -1], + dtype=np.float6432 if dtype == torch.float32 + else np.float6464) + angle_prior_signs = torch.tensor(angle_prior_signs, + dtype=dtype) + self.register_buffer('angle_prior_signs', angle_prior_signs) + + def forward(self, pose, with_global_pose=False): + ''' Returns the angle prior loss for the given pose + Args: + pose: (Bx[23 + 1] * 3) torch tensor with the axis-angle + representation of the rotations of the joints of the SMPL model. + Kwargs: + with_global_pose: Whether the pose vector also contains the global + orientation of the SMPL model. If not then the indices must be + corrected. + Returns: + A sze (B) tensor containing the angle prior loss for each element + in the batch. + ''' + angle_prior_idxs = self.angle_prior_idxs - (not with_global_pose) * 3 + return torch.exp(pose[:, angle_prior_idxs] * + self.angle_prior_signs).pow(2) + + +class L2Prior(nn.Module): + def __init__(self, dtype=DEFAULT_DTYPE, reduction='sum', **kwargs): + super(L2Prior, self).__init__() + + def forward(self, module_input, *args): + return torch.sum(module_input.pow(2)) + + +class MaxMixturePrior(nn.Module): + + def __init__(self, prior_folder='prior', + num_gaussians=6, dtype=DEFAULT_DTYPE, epsilon=1e-16, + use_merged=True, + **kwargs): + super(MaxMixturePrior, self).__init__() + + if dtype == DEFAULT_DTYPE: + np_dtype = np.float6432 + elif dtype == torch.float64: + np_dtype = np.float6464 + else: + print('Unknown float type {}, exiting!'.format(dtype)) + sys.exit(-1) + + self.num_gaussians = num_gaussians + self.epsilon = epsilon + self.use_merged = use_merged + gmm_fn = 'gmm_{:02d}.pkl'.format(num_gaussians) + + full_gmm_fn = os.path.join(prior_folder, gmm_fn) + if not os.path.exists(full_gmm_fn): + print('The path to the mixture prior "{}"'.format(full_gmm_fn) + + ' does not exist, exiting!') + sys.exit(-1) + + with open(full_gmm_fn, 'rb') as f: + gmm = pickle.load(f, encoding='latin1') + + if type(gmm) == dict: + means = gmm['means'].astype(np_dtype) + covs = gmm['covars'].astype(np_dtype) + weights = gmm['weights'].astype(np_dtype) + elif 'sklearn.mixture.gmm.GMM' in str(type(gmm)): + means = gmm.means_.astype(np_dtype) + covs = gmm.covars_.astype(np_dtype) + weights = gmm.weights_.astype(np_dtype) + else: + print('Unknown type for the prior: {}, exiting!'.format(type(gmm))) + sys.exit(-1) + + self.register_buffer('means', torch.tensor(means, dtype=dtype)) + + self.register_buffer('covs', torch.tensor(covs, dtype=dtype)) + + precisions = [np.linalg.inv(cov) for cov in covs] + precisions = np.stack(precisions).astype(np_dtype) + + self.register_buffer('precisions', + torch.tensor(precisions, dtype=dtype)) + + # The constant term: + sqrdets = np.array([(np.sqrt(np.linalg.det(c))) + for c in gmm['covars']]) + const = (2 * np.pi)**(69 / 2.) + + nll_weights = np.asarray(gmm['weights'] / (const * + (sqrdets / sqrdets.min()))) + nll_weights = torch.tensor(nll_weights, dtype=dtype).unsqueeze(dim=0) + self.register_buffer('nll_weights', nll_weights) + + weights = torch.tensor(gmm['weights'], dtype=dtype).unsqueeze(dim=0) + self.register_buffer('weights', weights) + + self.register_buffer('pi_term', + torch.log(torch.tensor(2 * np.pi, dtype=dtype))) + + cov_dets = [np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon) + for cov in covs] + self.register_buffer('cov_dets', + torch.tensor(cov_dets, dtype=dtype)) + + # The dimensionality of the random variable + self.random_var_dim = self.means.shape[1] + + def get_mean(self): + ''' Returns the mean of the mixture ''' + mean_pose = torch.matmul(self.weights, self.means) + return mean_pose + + def merged_log_likelihood(self, pose, betas): + diff_from_mean = pose.unsqueeze(dim=1) - self.means + + prec_diff_prod = torch.einsum('mij,bmj->bmi', + [self.precisions, diff_from_mean]) + diff_prec_quadratic = (prec_diff_prod * diff_from_mean).sum(dim=-1) + + curr_loglikelihood = 0.5 * diff_prec_quadratic - \ + torch.log(self.nll_weights) + # curr_loglikelihood = 0.5 * (self.cov_dets.unsqueeze(dim=0) + + # self.random_var_dim * self.pi_term + + # diff_prec_quadratic + # ) - torch.log(self.weights) + + min_likelihood, _ = torch.min(curr_loglikelihood, dim=1) + return min_likelihood + + def log_likelihood(self, pose, betas, *args, **kwargs): + ''' Create graph operation for negative log-likelihood calculation + ''' + likelihoods = [] + + for idx in range(self.num_gaussians): + mean = self.means[idx] + prec = self.precisions[idx] + cov = self.covs[idx] + diff_from_mean = pose - mean + + curr_loglikelihood = torch.einsum('bj,ji->bi', + [diff_from_mean, prec]) + curr_loglikelihood = torch.einsum('bi,bi->b', + [curr_loglikelihood, + diff_from_mean]) + cov_term = torch.log(torch.det(cov) + self.epsilon) + curr_loglikelihood += 0.5 * (cov_term + + self.random_var_dim * + self.pi_term) + likelihoods.append(curr_loglikelihood) + + log_likelihoods = torch.stack(likelihoods, dim=1) + min_idx = torch.argmin(log_likelihoods, dim=1) + weight_component = self.nll_weights[:, min_idx] + weight_component = -torch.log(weight_component) + + return weight_component + log_likelihoods[:, min_idx] + + def forward(self, pose, betas): + if self.use_merged: + return self.merged_log_likelihood(pose, betas) + else: + return self.log_likelihood(pose, betas) diff --git a/Evaluator_272/mld/transforms/joints2rots/smplify.py b/Evaluator_272/mld/transforms/joints2rots/smplify.py new file mode 100644 index 0000000000000000000000000000000000000000..7df51503a4a46a479a508c9fdf362cb063b93742 --- /dev/null +++ b/Evaluator_272/mld/transforms/joints2rots/smplify.py @@ -0,0 +1,284 @@ +import torch +import os, sys +import pickle +import smplx +import numpy as np +from tqdm import tqdm + +sys.path.append(os.path.dirname(__file__)) +from customloss import (camera_fitting_loss, + body_fitting_loss, + camera_fitting_loss_3d, + body_fitting_loss_3d, + ) +from prior import MaxMixturePrior +import config + + + +@torch.no_grad() +def guess_init_3d(model_joints, + j3d, + joints_category="orig"): + """Initialize the camera translation via triangle similarity, by using the torso joints . + :param model_joints: SMPL model with pre joints + :param j3d: 25x3 array of Kinect Joints + :returns: 3D vector corresponding to the estimated camera translation + """ + # get the indexed four + gt_joints = ['RHip', 'LHip', 'RShoulder', 'LShoulder'] + gt_joints_ind = [config.JOINT_MAP[joint] for joint in gt_joints] + + if joints_category=="orig": + joints_ind_category = [config.JOINT_MAP[joint] for joint in gt_joints] + elif joints_category=="AMASS": + joints_ind_category = [config.AMASS_JOINT_MAP[joint] for joint in gt_joints] + elif joints_category=="MMM": + joints_ind_category = [config.MMM_JOINT_MAP[joint] for joint in gt_joints] + else: + print("NO SUCH JOINTS CATEGORY!") + + sum_init_t = (j3d[:, joints_ind_category] - model_joints[:, gt_joints_ind]).sum(dim=1) + init_t = sum_init_t / 4.0 + return init_t + + +# SMPLIfy 3D +class SMPLify3D(): + """Implementation of SMPLify, use 3D joints.""" + + def __init__(self, + smplxmodel, + step_size=1e-2, + batch_size=1, + num_iters=100, + use_collision=False, + use_lbfgs=True, + joints_category="orig", + device=torch.device('cuda:0'), + ): + + # Store options + self.batch_size = batch_size + self.device = device + self.step_size = step_size + + self.num_iters = num_iters + # --- choose optimizer + self.use_lbfgs = use_lbfgs + # GMM pose prior + self.pose_prior = MaxMixturePrior(prior_folder=config.GMM_MODEL_DIR, + num_gaussians=8, + dtype=torch.float32).to(device) + # collision part + self.use_collision = use_collision + if self.use_collision: + self.part_segm_fn = config.Part_Seg_DIR + + # reLoad SMPL-X model + self.smpl = smplxmodel + + self.model_faces = smplxmodel.faces_tensor.view(-1) + + # select joint joint_category + self.joints_category = joints_category + + if joints_category=="orig": + self.smpl_index = config.full_smpl_idx + self.corr_index = config.full_smpl_idx + elif joints_category=="AMASS": + self.smpl_index = config.amass_smpl_idx + self.corr_index = config.amass_idx + # elif joints_category=="MMM": + # self.smpl_index = config.mmm_smpl_dix + # self.corr_index = config.mmm_idx + else: + self.smpl_index = None + self.corr_index = None + print("NO SUCH JOINTS CATEGORY!") + + # ---- get the man function here ------ + def __call__(self, init_pose, init_betas, init_cam_t, j3d, conf_3d=1.0, seq_ind=0): + """Perform body fitting. + Input: + init_pose: SMPL pose estimate + init_betas: SMPL betas estimate + init_cam_t: Camera translation estimate + j3d: joints 3d aka keypoints + conf_3d: confidence for 3d joints + seq_ind: index of the sequence + Returns: + vertices: Vertices of optimized shape + joints: 3D joints of optimized shape + pose: SMPL pose parameters of optimized shape + betas: SMPL beta parameters of optimized shape + camera_translation: Camera translation + """ + + # # # add the mesh inter-section to avoid + search_tree = None + pen_distance = None + filter_faces = None + + if self.use_collision: + from mesh_intersection.bvh_search_tree import BVH + import mesh_intersection.loss as collisions_loss + from mesh_intersection.filter_faces import FilterFaces + + search_tree = BVH(max_collisions=8) + + pen_distance = collisions_loss.DistanceFieldPenetrationLoss( + sigma=0.5, point2plane=False, vectorized=True, penalize_outside=True) + + if self.part_segm_fn: + # Read the part segmentation + part_segm_fn = os.path.expandvars(self.part_segm_fn) + with open(part_segm_fn, 'rb') as faces_parents_file: + face_segm_data = pickle.load(faces_parents_file, encoding='latin1') + faces_segm = face_segm_data['segm'] + faces_parents = face_segm_data['parents'] + # Create the module used to filter invalid collision pairs + filter_faces = FilterFaces( + faces_segm=faces_segm, faces_parents=faces_parents, + ign_part_pairs=None).to(device=self.device) + + + # Split SMPL pose to body pose and global orientation + body_pose = init_pose[:, 3:].detach().clone() + global_orient = init_pose[:, :3].detach().clone() + betas = init_betas.detach().clone() + + # use guess 3d to get the initial + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas) + model_joints = smpl_output.joints + + init_cam_t = guess_init_3d(model_joints, j3d, self.joints_category).detach() + camera_translation = init_cam_t.clone() + + preserve_pose = init_pose[:, 3:].detach().clone() + # -------------Step 1: Optimize camera translation and body orientation-------- + # Optimize only camera translation and body orientation + body_pose.requires_grad = False + betas.requires_grad = False + global_orient.requires_grad = True + camera_translation.requires_grad = True + + camera_opt_params = [global_orient, camera_translation] + + if self.use_lbfgs: + camera_optimizer = torch.optim.LBFGS(camera_opt_params, max_iter=self.num_iters, + lr=self.step_size, line_search_fn='strong_wolfe') + for i in range(10): + def closure(): + camera_optimizer.zero_grad() + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas) + model_joints = smpl_output.joints + + loss = camera_fitting_loss_3d(model_joints, camera_translation, + init_cam_t, j3d, self.joints_category) + loss.backward() + return loss + + camera_optimizer.step(closure) + else: + camera_optimizer = torch.optim.Adam(camera_opt_params, lr=self.step_size, betas=(0.9, 0.999)) + + for i in range(20): + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas) + model_joints = smpl_output.joints + + loss = camera_fitting_loss_3d(model_joints[:, self.smpl_index], camera_translation, + init_cam_t, j3d[:, self.corr_index], self.joints_category) + camera_optimizer.zero_grad() + loss.backward() + camera_optimizer.step() + + # Fix camera translation after optimizing camera + # --------Step 2: Optimize body joints -------------------------- + # Optimize only the body pose and global orientation of the body + body_pose.requires_grad = True + global_orient.requires_grad = True + camera_translation.requires_grad = True + + # --- if we use the sequence, fix the shape + if seq_ind == 0: + betas.requires_grad = True + body_opt_params = [body_pose, betas, global_orient, camera_translation] + else: + betas.requires_grad = False + body_opt_params = [body_pose, global_orient, camera_translation] + + if self.use_lbfgs: + body_optimizer = torch.optim.LBFGS(body_opt_params, max_iter=self.num_iters, + lr=self.step_size, line_search_fn='strong_wolfe') + + for i in tqdm(range(self.num_iters), desc=f"LBFGS iter: "): + # for i in range(self.num_iters): + def closure(): + body_optimizer.zero_grad() + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas) + model_joints = smpl_output.joints + model_vertices = smpl_output.vertices + + loss = body_fitting_loss_3d(body_pose, preserve_pose, betas, model_joints[:, self.smpl_index], camera_translation, + j3d[:, self.corr_index], self.pose_prior, + joints3d_conf=conf_3d, + joint_loss_weight=600.0, + pose_preserve_weight=5.0, + use_collision=self.use_collision, + model_vertices=model_vertices, model_faces=self.model_faces, + search_tree=search_tree, pen_distance=pen_distance, filter_faces=filter_faces) + loss.backward() + return loss + + body_optimizer.step(closure) + else: + body_optimizer = torch.optim.Adam(body_opt_params, lr=self.step_size, betas=(0.9, 0.999)) + + for i in range(self.num_iters): + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas) + model_joints = smpl_output.joints + model_vertices = smpl_output.vertices + + loss = body_fitting_loss_3d(body_pose, preserve_pose, betas, model_joints[:, self.smpl_index], camera_translation, + j3d[:, self.corr_index], self.pose_prior, + joints3d_conf=conf_3d, + joint_loss_weight=600.0, + use_collision=self.use_collision, + model_vertices=model_vertices, model_faces=self.model_faces, + search_tree=search_tree, pen_distance=pen_distance, filter_faces=filter_faces) + body_optimizer.zero_grad() + loss.backward() + body_optimizer.step() + + # Get final loss value + with torch.no_grad(): + smpl_output = self.smpl(global_orient=global_orient, + body_pose=body_pose, + betas=betas, return_full_pose=True) + model_joints = smpl_output.joints + model_vertices = smpl_output.vertices + + final_loss = body_fitting_loss_3d(body_pose, preserve_pose, betas, model_joints[:, self.smpl_index], camera_translation, + j3d[:, self.corr_index], self.pose_prior, + joints3d_conf=conf_3d, + joint_loss_weight=600.0, + use_collision=self.use_collision, model_vertices=model_vertices, model_faces=self.model_faces, + search_tree=search_tree, pen_distance=pen_distance, filter_faces=filter_faces) + + vertices = smpl_output.vertices.detach() + joints = smpl_output.joints.detach() + pose = torch.cat([global_orient, body_pose], dim=-1).detach() + betas = betas.detach() + + return vertices, joints, pose, betas, camera_translation, final_loss \ No newline at end of file diff --git a/Evaluator_272/mld/transforms/rotation2xyz.py b/Evaluator_272/mld/transforms/rotation2xyz.py new file mode 100644 index 0000000000000000000000000000000000000000..8a62fbe7eff18cae14c4768084a41cc375914198 --- /dev/null +++ b/Evaluator_272/mld/transforms/rotation2xyz.py @@ -0,0 +1,114 @@ +# This code is based on https://github.com/Mathux/ACTOR.git +import torch +import mld.utils.rotation_conversions as geometry + +from .smpl import SMPL, JOINTSTYPE_ROOT +# from .get_model import JOINTSTYPES +JOINTSTYPES = ["a2m", "a2mpl", "smpl", "vibe", "vertices"] + + +class Rotation2xyz(torch.nn.Module): + + def __init__(self, smpl_path): + super().__init__() + self.smpl_model = SMPL(smpl_path).eval() + + def __call__(self, + x, + mask, + pose_rep, + translation, + glob, + jointstype, + vertstrans, + betas=None, + beta=0, + glob_rot=None, + get_rotations_back=False, + **kwargs): + if pose_rep == "xyz": + return x + + if mask is None: + mask = torch.ones((x.shape[0], x.shape[-1]), + dtype=bool, + device=x.device) + + if not glob and glob_rot is None: + raise TypeError( + "You must specify global rotation if glob is False") + + if jointstype not in JOINTSTYPES: + raise NotImplementedError("This jointstype is not implemented.") + + if translation: + x_translations = x[:, -1, :3] + x_rotations = x[:, :-1] + else: + x_rotations = x + + x_rotations = x_rotations.permute(0, 3, 1, 2) + nsamples, time, njoints, feats = x_rotations.shape + + # Compute rotations (convert only masked sequences output) + if pose_rep == "rotvec": + rotations = geometry.axis_angle_to_matrix(x_rotations[mask]) + elif pose_rep == "rotmat": + rotations = x_rotations[mask].view(-1, njoints, 3, 3) + elif pose_rep == "rotquat": + rotations = geometry.quaternion_to_matrix(x_rotations[mask]) + elif pose_rep == "rot6d": + rotations = geometry.rotation_6d_to_matrix(x_rotations[mask]) + else: + raise NotImplementedError("No geometry for this one.") + + if not glob: + global_orient = torch.tensor(glob_rot, device=x.device) + global_orient = geometry.axis_angle_to_matrix(global_orient).view( + 1, 1, 3, 3) + global_orient = global_orient.repeat(len(rotations), 1, 1, 1) + else: + global_orient = rotations[:, 0] + rotations = rotations[:, 1:] + + if betas is None: + betas = torch.zeros( + [rotations.shape[0], self.smpl_model.num_betas], + dtype=rotations.dtype, + device=rotations.device) + betas[:, 1] = beta + + out = self.smpl_model(body_pose=rotations, + global_orient=global_orient, + betas=betas) + + # get the desirable joints + joints = out[jointstype] + + x_xyz = torch.empty(nsamples, + time, + joints.shape[1], + 3, + device=x.device, + dtype=x.dtype) + x_xyz[~mask] = 0 + x_xyz[mask] = joints + + x_xyz = x_xyz.permute(0, 2, 3, 1).contiguous() + + # the first translation root at the origin on the prediction + if jointstype != "vertices": + rootindex = JOINTSTYPE_ROOT[jointstype] + x_xyz = x_xyz - x_xyz[:, [rootindex], :, :] + + if translation and vertstrans: + # the first translation root at the origin + x_translations = x_translations - x_translations[:, :, [0]] + + # add the translation to all the joints + x_xyz = x_xyz + x_translations[:, None, :, :] + + if get_rotations_back: + return x_xyz, rotations, global_orient + else: + return x_xyz diff --git a/Evaluator_272/mld/transforms/rots2joints/__init__.py b/Evaluator_272/mld/transforms/rots2joints/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2755abfcdfeccf7866ce8dac0d165c5d13c94d4d --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2joints/__init__.py @@ -0,0 +1,2 @@ +from .base import Rots2Joints +from .smplh import SMPLH diff --git a/Evaluator_272/mld/transforms/rots2joints/base.py b/Evaluator_272/mld/transforms/rots2joints/base.py new file mode 100644 index 0000000000000000000000000000000000000000..d683dd604be755e73ee5906d2bc9bc429216d95a --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2joints/base.py @@ -0,0 +1,34 @@ +from typing import Optional + +import torch +from torch import Tensor, nn +from pathlib import Path + + +class Rots2Joints(nn.Module): + def __init__(self, path: Optional[str] = None, + normalization: bool = False, + eps: float = 1e-12, + **kwargs) -> None: + if normalization and path is None: + raise TypeError("You should provide a path if normalization is on.") + + super().__init__() + self.normalization = normalization + self.eps = eps + + if normalization: + mean_path = Path(path) / "mean.pt" + std_path = Path(path) / "std.pt" + self.register_buffer('mean', torch.load(mean_path)) + self.register_buffer('std', torch.load(std_path)) + + def normalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = (features - self.mean)/(self.std + self.eps) + return features + + def unnormalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = features * self.std + self.mean + return features diff --git a/Evaluator_272/mld/transforms/rots2joints/smplh.py b/Evaluator_272/mld/transforms/rots2joints/smplh.py new file mode 100644 index 0000000000000000000000000000000000000000..bab75a20d9eecac1375fbd435788e5c9deaa0b6f --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2joints/smplh.py @@ -0,0 +1,175 @@ +import contextlib +from typing import Optional + +import torch +from einops import rearrange +from torch import Tensor +from .base import Rots2Joints + + +def slice_or_none(data, cslice): + if data is None: + return data + else: + return data[cslice] + + +class SMPLH(Rots2Joints): + + def __init__(self, + path: str, + jointstype: str = "mmm", + input_pose_rep: str = "matrix", + batch_size: int = 512, + gender="neutral", + **kwargs) -> None: + super().__init__(path=None, normalization=False) + self.batch_size = batch_size + self.input_pose_rep = input_pose_rep + self.jointstype = jointstype + self.training = False + + from smplx.body_models import SMPLHLayer + + # Remove annoying print + with contextlib.redirect_stdout(None): + self.smplh = SMPLHLayer(path, ext="npz", gender=gender).eval() + + self.faces = self.smplh.faces + for p in self.parameters(): + p.requires_grad = False + + def train(self, *args, **kwargs): + return self + + def forward(self, + smpl_data: dict, + jointstype: Optional[str] = None, + input_pose_rep: Optional[str] = None, + batch_size: Optional[int] = None) -> Tensor: + + # Take values from init if not specified there + jointstype = self.jointstype if jointstype is None else jointstype + batch_size = self.batch_size if batch_size is None else batch_size + input_pose_rep = self.input_pose_rep if input_pose_rep is None else input_pose_rep + + if input_pose_rep == "xyz": + raise NotImplementedError( + "You should use identity pose2joints instead") + + poses = smpl_data.rots + trans = smpl_data.trans + + from functools import reduce + import operator + save_shape_bs_len = poses.shape[:-3] + nposes = reduce(operator.mul, save_shape_bs_len, 1) + + if poses.shape[-3] == 52: + nohands = False + elif poses.shape[-3] == 22: + nohands = True + else: + raise NotImplementedError("Could not parse the poses.") + + # Convert any rotations to matrix + # from mld.tools.easyconvert import to_matrix + # matrix_poses = to_matrix(input_pose_rep, poses) + matrix_poses = poses + + # Reshaping + matrix_poses = matrix_poses.reshape((nposes, *matrix_poses.shape[-3:])) + global_orient = matrix_poses[:, 0] + + if trans is None: + trans = torch.zeros((*save_shape_bs_len, 3), + dtype=poses.dtype, + device=poses.device) + + trans_all = trans.reshape((nposes, *trans.shape[-1:])) + + body_pose = matrix_poses[:, 1:22] + if nohands: + from mld.tools.easyconvert import to_matrix + # still axis angle + left_hand_pose = self.smplh.left_hand_mean.reshape(15, 3) + left_hand_pose = to_matrix("axisangle", left_hand_pose) + left_hand_pose = left_hand_pose[None].repeat((nposes, 1, 1, 1)) + + right_hand_pose = self.smplh.right_hand_mean.reshape(15, 3) + right_hand_pose = to_matrix("axisangle", right_hand_pose) + right_hand_pose = right_hand_pose[None].repeat((nposes, 1, 1, 1)) + else: + hand_pose = matrix_poses[:, 22:] + left_hand_pose = hand_pose[:, :15] + right_hand_pose = hand_pose[:, 15:] + + n = len(body_pose) + outputs = [] + for chunk in range(int((n - 1) / batch_size) + 1): + chunk_slice = slice(chunk * batch_size, (chunk + 1) * batch_size) + smpl_output = self.smplh( + global_orient=slice_or_none(global_orient, chunk_slice), + body_pose=slice_or_none(body_pose, chunk_slice), + left_hand_pose=slice_or_none(left_hand_pose, chunk_slice), + right_hand_pose=slice_or_none(right_hand_pose, chunk_slice), + transl=slice_or_none(trans_all, chunk_slice)) + + if jointstype == "vertices": + output_chunk = smpl_output.vertices + else: + joints = smpl_output.joints + output_chunk = joints + outputs.append(output_chunk) + + outputs = torch.cat(outputs) + outputs = outputs.reshape((*save_shape_bs_len, *outputs.shape[1:])) + + # Change topology if needed + outputs = smplh_to(jointstype, outputs, trans) + return outputs + + def inverse(self, joints: Tensor) -> Tensor: + raise NotImplementedError("Cannot inverse SMPLH layer.") + + +def smplh_to(jointstype, data, trans): + from mld.utils.joints import get_root_idx + + if "mmm" in jointstype: + from mld.utils.joints import smplh2mmm_indexes + indexes = smplh2mmm_indexes + data = data[..., indexes, :] + + # make it compatible with mmm + if jointstype == "mmm": + from mld.utils.joints import smplh_to_mmm_scaling_factor + data *= smplh_to_mmm_scaling_factor + + if jointstype == "smplmmm": + pass + elif jointstype in ["mmm", "mmmns"]: + # swap axis + data = data[..., [1, 2, 0]] + # revert left and right + data[..., 2] = -data[..., 2] + + elif jointstype == "smplnh": + from mld.utils.joints import smplh2smplnh_indexes + indexes = smplh2smplnh_indexes + data = data[..., indexes, :] + elif jointstype == "smplh": + pass + elif jointstype == "vertices": + pass + else: + raise NotImplementedError(f"SMPLH to {jointstype} is not implemented.") + + if jointstype != "vertices": + # shift the output in each batch + # such that it is centered on the pelvis/root on the first frame + root_joint_idx = get_root_idx(jointstype) + shift = trans[..., 0, :] - data[..., 0, root_joint_idx, :] + data += shift[..., None, None, :] + + return data diff --git a/Evaluator_272/mld/transforms/rots2rfeats/__init__.py b/Evaluator_272/mld/transforms/rots2rfeats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ff007f2f014b8264971f532ebab44f45f2c7b90 --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2rfeats/__init__.py @@ -0,0 +1,2 @@ +from .base import Rots2Rfeats +from .smplvelp import SMPLVelP diff --git a/Evaluator_272/mld/transforms/rots2rfeats/base.py b/Evaluator_272/mld/transforms/rots2rfeats/base.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a98c598676a2cb731ea843e202b11c4f928ef5 --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2rfeats/base.py @@ -0,0 +1,34 @@ +from typing import Optional + +import torch +from torch import Tensor, nn +from pathlib import Path + + +class Rots2Rfeats(nn.Module): + def __init__(self, path: Optional[str] = None, + normalization: bool = False, + eps: float = 1e-12, + **kwargs) -> None: + if normalization and path is None: + raise TypeError("You should provide a path if normalization is on.") + + super().__init__() + self.normalization = normalization + self.eps = eps + + if normalization: + mean_path = Path(path) / "rfeats_mean.pt" + std_path = Path(path) / "rfeats_std.pt" + self.register_buffer('mean', torch.load(mean_path)) + self.register_buffer('std', torch.load(std_path)) + + def normalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = (features - self.mean)/(self.std + self.eps) + return features + + def unnormalize(self, features: Tensor) -> Tensor: + if self.normalization: + features = features * self.std + self.mean + return features diff --git a/Evaluator_272/mld/transforms/rots2rfeats/smplvelp.py b/Evaluator_272/mld/transforms/rots2rfeats/smplvelp.py new file mode 100644 index 0000000000000000000000000000000000000000..0b4355ad868bd37c187dcd57b4ac25cdd6c5f72b --- /dev/null +++ b/Evaluator_272/mld/transforms/rots2rfeats/smplvelp.py @@ -0,0 +1,101 @@ +from typing import Optional + +import torch +from torch import Tensor +from einops import rearrange + +from mld.utils.temos_utils import matrix_to, nfeats_of, to_matrix +import mld.utils.geometry as geometry + +from .base import Rots2Rfeats + + +class SMPLVelP(Rots2Rfeats): + + def __init__(self, + path: Optional[str] = None, + normalization: bool = False, + pose_rep: str = "rot6d", + canonicalize: bool = False, + offset: bool = True, + **kwargs) -> None: + super().__init__(path=path, normalization=normalization) + self.canonicalize = canonicalize + self.pose_rep = pose_rep + self.nfeats = nfeats_of(pose_rep) + self.offset = offset + + def forward(self, data) -> Tensor: + matrix_poses, trans = data.rots, data.trans + # matrix_poses: [nframes, 22, 3, 3] + + # extract the root gravity axis + # for smpl it is the last coordinate + root_y = trans[..., 2] + trajectory = trans[..., [0, 1]] + + # Comoute the difference of trajectory (for X and Y axis) + vel_trajectory = torch.diff(trajectory, dim=-2) + # 0 for the first one => keep the dimentionality + vel_trajectory = torch.cat( + (0 * vel_trajectory[..., [0], :], vel_trajectory), dim=-2) + + # first normalize the data + if self.canonicalize: + global_orient = matrix_poses[..., 0, :, :] + # remove the rotation + rot2d = geometry.matrix_to_axis_angle(global_orient[..., 0, :, :]) + # Remove the fist rotation along the vertical axis + # construct this by extract only the vertical component of the rotation + rot2d[..., :2] = 0 + + if self.offset: + # add a bit more rotation + rot2d[..., 2] += torch.pi / 2 + + rot2d = geometry.axis_angle_to_matrix(rot2d) + + # turn with the same amount all the rotations + global_orient = torch.einsum("...kj,...kl->...jl", rot2d, + global_orient) + + matrix_poses = torch.cat( + (global_orient[..., None, :, :], matrix_poses[..., 1:, :, :]), + dim=-3) + + # Turn the trajectory as well + vel_trajectory = torch.einsum("...kj,...lk->...lj", + rot2d[..., :2, :2], vel_trajectory) + + poses = matrix_to(self.pose_rep, matrix_poses) + features = torch.cat( + (root_y[..., None], vel_trajectory, + rearrange(poses, "... joints rot -> ... (joints rot)")), + dim=-1) + features = self.normalize(features) + return features + + def extract(self, features): + root_y = features[..., 0] + vel_trajectory = features[..., 1:3] + poses_features = features[..., 3:] + poses = rearrange(poses_features, + "... (joints rot) -> ... joints rot", + rot=self.nfeats) + return root_y, vel_trajectory, poses + + def inverse(self, features): + features = self.unnormalize(features) + root_y, vel_trajectory, poses = self.extract(features) + + # integrate the trajectory + trajectory = torch.cumsum(vel_trajectory, dim=-2) + # First frame should be 0, but if infered it is better to ensure it + trajectory = trajectory - trajectory[..., [0], :] + + # Get back the translation + trans = torch.cat([trajectory, root_y[..., None]], dim=-1) + matrix_poses = to_matrix(self.pose_rep, poses) + + from temos.transforms.smpl import RotTransDatastruct + return RotTransDatastruct(rots=matrix_poses, trans=trans) diff --git a/Evaluator_272/mld/transforms/smpl.py b/Evaluator_272/mld/transforms/smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..f83c5ff18af981164950728f2bbf7e57214652c9 --- /dev/null +++ b/Evaluator_272/mld/transforms/smpl.py @@ -0,0 +1,253 @@ +from typing import Optional +from torch import Tensor +import numpy as np +import torch +import contextlib +from .base import Datastruct, dataclass, Transform +import os +from .rots2rfeats import Rots2Rfeats +from .rots2joints import Rots2Joints +from .joints2jfeats import Joints2Jfeats + + +class SMPLTransform(Transform): + + def __init__(self, rots2rfeats: Rots2Rfeats, rots2joints: Rots2Joints, + joints2jfeats: Joints2Jfeats, **kwargs): + self.rots2rfeats = rots2rfeats + self.rots2joints = rots2joints + self.joints2jfeats = joints2jfeats + + def Datastruct(self, **kwargs): + return SMPLDatastruct(_rots2rfeats=self.rots2rfeats, + _rots2joints=self.rots2joints, + _joints2jfeats=self.joints2jfeats, + transforms=self, + **kwargs) + + def __repr__(self): + return "SMPLTransform()" + + +class RotIdentityTransform(Transform): + + def __init__(self, **kwargs): + return + + def Datastruct(self, **kwargs): + return RotTransDatastruct(**kwargs) + + def __repr__(self): + return "RotIdentityTransform()" + + +@dataclass +class RotTransDatastruct(Datastruct): + rots: Tensor + trans: Tensor + + transforms: RotIdentityTransform = RotIdentityTransform() + + def __post_init__(self): + self.datakeys = ["rots", "trans"] + + def __len__(self): + return len(self.rots) + + +@dataclass +class SMPLDatastruct(Datastruct): + transforms: SMPLTransform + _rots2rfeats: Rots2Rfeats + _rots2joints: Rots2Joints + _joints2jfeats: Joints2Jfeats + + features: Optional[Tensor] = None + rots_: Optional[RotTransDatastruct] = None + rfeats_: Optional[Tensor] = None + joints_: Optional[Tensor] = None + jfeats_: Optional[Tensor] = None + + def __post_init__(self): + self.datakeys = ["features", "rots_", "rfeats_", "joints_", "jfeats_"] + # starting point + if self.features is not None and self.rfeats_ is None: + self.rfeats_ = self.features + + @property + def rots(self): + # Cached value + if self.rots_ is not None: + return self.rots_ + + # self.rfeats_ should be defined + assert self.rfeats_ is not None + + self._rots2rfeats.to(self.rfeats.device) + self.rots_ = self._rots2rfeats.inverse(self.rfeats) + return self.rots_ + + @property + def rfeats(self): + # Cached value + if self.rfeats_ is not None: + return self.rfeats_ + + # self.rots_ should be defined + assert self.rots_ is not None + + self._rots2rfeats.to(self.rots.device) + self.rfeats_ = self._rots2rfeats(self.rots) + return self.rfeats_ + + @property + def joints(self): + # Cached value + if self.joints_ is not None: + return self.joints_ + + self._rots2joints.to(self.rots.device) + self.joints_ = self._rots2joints(self.rots) + return self.joints_ + + @property + def jfeats(self): + # Cached value + if self.jfeats_ is not None: + return self.jfeats_ + + self._joints2jfeats.to(self.joints.device) + self.jfeats_ = self._joints2jfeats(self.joints) + return self.jfeats_ + + def __len__(self): + return len(self.rfeats) + + +# This code is based on https://github.com/Mathux/ACTOR.git +from smplx import SMPLLayer as _SMPLLayer +from smplx.lbs import vertices2joints + +# action2motion_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 21, 24, 38] +# change 0 and 8 +action2motion_joints = [ + 8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38 +] + +SMPL_DATA_PATH = 'deps/smpl' + +JOINTSTYPE_ROOT = { + "a2m": 0, # action2motion + "smpl": 0, + "a2mpl": 0, # set(smpl, a2m) + "vibe": 8 +} # 0 is the 8 position: OP MidHip below + +JOINT_MAP = { + 'OP Nose': 24, + 'OP Neck': 12, + 'OP RShoulder': 17, + 'OP RElbow': 19, + 'OP RWrist': 21, + 'OP LShoulder': 16, + 'OP LElbow': 18, + 'OP LWrist': 20, + 'OP MidHip': 0, + 'OP RHip': 2, + 'OP RKnee': 5, + 'OP RAnkle': 8, + 'OP LHip': 1, + 'OP LKnee': 4, + 'OP LAnkle': 7, + 'OP REye': 25, + 'OP LEye': 26, + 'OP REar': 27, + 'OP LEar': 28, + 'OP LBigToe': 29, + 'OP LSmallToe': 30, + 'OP LHeel': 31, + 'OP RBigToe': 32, + 'OP RSmallToe': 33, + 'OP RHeel': 34, + 'Right Ankle': 8, + 'Right Knee': 5, + 'Right Hip': 45, + 'Left Hip': 46, + 'Left Knee': 4, + 'Left Ankle': 7, + 'Right Wrist': 21, + 'Right Elbow': 19, + 'Right Shoulder': 17, + 'Left Shoulder': 16, + 'Left Elbow': 18, + 'Left Wrist': 20, + 'Neck (LSP)': 47, + 'Top of Head (LSP)': 48, + 'Pelvis (MPII)': 49, + 'Thorax (MPII)': 50, + 'Spine (H36M)': 51, + 'Jaw (H36M)': 52, + 'Head (H36M)': 53, + 'Nose': 24, + 'Left Eye': 26, + 'Right Eye': 25, + 'Left Ear': 28, + 'Right Ear': 27 +} + +JOINT_NAMES = [ + 'OP Nose', 'OP Neck', 'OP RShoulder', 'OP RElbow', 'OP RWrist', + 'OP LShoulder', 'OP LElbow', 'OP LWrist', 'OP MidHip', 'OP RHip', + 'OP RKnee', 'OP RAnkle', 'OP LHip', 'OP LKnee', 'OP LAnkle', 'OP REye', + 'OP LEye', 'OP REar', 'OP LEar', 'OP LBigToe', 'OP LSmallToe', 'OP LHeel', + 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', 'Right Ankle', 'Right Knee', + 'Right Hip', 'Left Hip', 'Left Knee', 'Left Ankle', 'Right Wrist', + 'Right Elbow', 'Right Shoulder', 'Left Shoulder', 'Left Elbow', + 'Left Wrist', 'Neck (LSP)', 'Top of Head (LSP)', 'Pelvis (MPII)', + 'Thorax (MPII)', 'Spine (H36M)', 'Jaw (H36M)', 'Head (H36M)', 'Nose', + 'Left Eye', 'Right Eye', 'Left Ear', 'Right Ear' +] + + +# adapted from VIBE/SPIN to output smpl_joints, vibe joints and action2motion joints +class SMPL(_SMPLLayer): + """ Extension of the official SMPL implementation to support more joints """ + + def __init__(self, smpl_path=SMPL_DATA_PATH, **kwargs): + model_path = os.path.join(smpl_path, "SMPL_NEUTRAL.pkl") + J_path = os.path.join(smpl_path, 'J_regressor_extra.npy') + kwargs["model_path"] = model_path + + # remove the verbosity for the 10-shapes beta parameters + with contextlib.redirect_stdout(None): + super(SMPL, self).__init__(**kwargs) + + J_regressor_extra = np.load(J_path) + self.register_buffer( + 'J_regressor_extra', + torch.tensor(J_regressor_extra, dtype=torch.float32)) + vibe_indexes = np.array([JOINT_MAP[i] for i in JOINT_NAMES]) + a2m_indexes = vibe_indexes[action2motion_joints] + smpl_indexes = np.arange(24) + a2mpl_indexes = np.unique(np.r_[smpl_indexes, a2m_indexes]) + + self.maps = { + "vibe": vibe_indexes, + "a2m": a2m_indexes, + "smpl": smpl_indexes, + "a2mpl": a2mpl_indexes + } + + def forward(self, *args, **kwargs): + smpl_output = super(SMPL, self).forward(*args, **kwargs) + + extra_joints = vertices2joints(self.J_regressor_extra, + smpl_output.vertices) + all_joints = torch.cat([smpl_output.joints, extra_joints], dim=1) + + output = {"vertices": smpl_output.vertices} + + for joinstype, indexes in self.maps.items(): + output[joinstype] = all_joints[:, indexes] + + return output diff --git a/Evaluator_272/mld/transforms/xyz.py b/Evaluator_272/mld/transforms/xyz.py new file mode 100644 index 0000000000000000000000000000000000000000..f8590ea8f54fbb907cda85a5daa41bd9299ea1db --- /dev/null +++ b/Evaluator_272/mld/transforms/xyz.py @@ -0,0 +1,66 @@ +from typing import Optional +from torch import Tensor + +from .base import Datastruct, dataclass, Transform +from mld.datasets.utils import collate_tensor_with_padding + +from .joints2jfeats import Joints2Jfeats + + +class XYZTransform(Transform): + + def __init__(self, joints2jfeats: Joints2Jfeats, **kwargs): + self.joints2jfeats = joints2jfeats + + def Datastruct(self, **kwargs): + return XYZDatastruct(_joints2jfeats=self.joints2jfeats, + transforms=self, + **kwargs) + + def __repr__(self): + return "XYZTransform()" + + +@dataclass +class XYZDatastruct(Datastruct): + transforms: XYZTransform + _joints2jfeats: Joints2Jfeats + + features: Optional[Tensor] = None + joints_: Optional[Tensor] = None + jfeats_: Optional[Tensor] = None + + def __post_init__(self): + self.datakeys = ["features", "joints_", "jfeats_"] + # starting point + if self.features is not None and self.jfeats_ is None: + self.jfeats_ = self.features + + @property + def joints(self): + # Cached value + if self.joints_ is not None: + return self.joints_ + + # self.jfeats_ should be defined + assert self.jfeats_ is not None + + self._joints2jfeats.to(self.jfeats.device) + self.joints_ = self._joints2jfeats.inverse(self.jfeats) + return self.joints_ + + @property + def jfeats(self): + # Cached value + if self.jfeats_ is not None: + return self.jfeats_ + + # self.joints_ should be defined + assert self.joints_ is not None + + self._joints2jfeats.to(self.joints.device) + self.jfeats_ = self._joints2jfeats(self.joints) + return self.jfeats_ + + def __len__(self): + return len(self.jfeats) diff --git a/Evaluator_272/mld/utils/__init__.py b/Evaluator_272/mld/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Evaluator_272/mld/utils/demo_utils.py b/Evaluator_272/mld/utils/demo_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1a6299332523f4ed07c72740b4eb433a6550022c --- /dev/null +++ b/Evaluator_272/mld/utils/demo_utils.py @@ -0,0 +1,79 @@ +import os +from pathlib import Path + + +# load example data +def load_example_input(txt_path): + file = open(txt_path, "r") + Lines = file.readlines() + count = 0 + texts, lens = [], [] + # Strips the newline character + for line in Lines: + count += 1 + s = line.strip() + s_l = s.split(" ")[0] + s_t = s[(len(s_l) + 1):] + lens.append(int(s_l)) + texts.append(s_t) + print("Length-{}: {}".format(s_l, s_t)) + return texts, lens + + +# render batch +def render_batch(npy_dir, execute_python="./scripts/visualize_motion.sh", mode="sequence"): + os.system(f"{execute_python} {npy_dir} {mode}") + + +# render +def render(execute_python, npy_path, jointtype, cfg_path): + # execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" + # execute_python = "/apdcephfs/share_1227775/mingzhenzhu/jiangbiao/libs/blender-2.93.2-linux-x64/blender" + export_scripts = "render.py" + + os.system( + f"{execute_python} --background --python {export_scripts} -- --cfg={cfg_path} --npy={npy_path} --joint_type={jointtype}" + ) + + fig_path = Path(str(npy_path).replace(".npy", ".png")) + return fig_path + + +# origin render +# def render(npy_path, jointtype): +# execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender' +# export_scripts = 'render.py' + +# os.system(f"{execute_python} --background --python {export_scripts} -- npy={npy_path} jointstype={jointtype}") + +# fig_path = Path(str(npy_path).replace(".npy",".png")) +# return fig_path + +# export fbx with hand params from pkl files +# refer to /apdcephfs/share_1227775/shingxchen/AIMotion/TMOST/scripts/fbx_output_smplx.py +def export_fbx_hand(pkl_path): + input = pkl_path + output = pkl_path.replace(".pkl", ".fbx") + + execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" + export_scripts = "./scripts/fbx_output_smplx.py" + os.system( + f"{execute_python} -noaudio --background --python {export_scripts}\ + --input {input} \ + --output {output}" + ) + + +# export fbx without hand params from pkl files +# refer to /apdcephfs/share_1227775/shingxchen/AIMotion/TMOST/scripts/fbx_output.py +def export_fbx(pkl_path): + input = pkl_path + output = pkl_path.replace(".pkl", ".fbx") + + execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" + export_scripts = "./scripts/fbx_output.py" + os.system( + f"{execute_python} -noaudio --background --python {export_scripts}\ + --input {input} \ + --output {output}" + ) diff --git a/Evaluator_272/mld/utils/easyconvert.py b/Evaluator_272/mld/utils/easyconvert.py new file mode 100644 index 0000000000000000000000000000000000000000..ba4061c4904d6d5ee807c85adff6fb721f8ed548 --- /dev/null +++ b/Evaluator_272/mld/utils/easyconvert.py @@ -0,0 +1,73 @@ +import mld.utils.geometry as geometry + + +def nfeats_of(rottype): + if rottype in ["rotvec", "axisangle"]: + return 3 + elif rottype in ["rotquat", "quaternion"]: + return 4 + elif rottype in ["rot6d", "6drot", "rotation6d"]: + return 6 + elif rottype in ["rotmat"]: + return 9 + else: + return TypeError("This rotation type doesn't have features.") + + +def axis_angle_to(newtype, rotations): + if newtype in ["matrix"]: + rotations = geometry.axis_angle_to_matrix(rotations) + return rotations + elif newtype in ["rotmat"]: + rotations = geometry.axis_angle_to_matrix(rotations) + rotations = matrix_to("rotmat", rotations) + return rotations + elif newtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.axis_angle_to_matrix(rotations) + rotations = matrix_to("rot6d", rotations) + return rotations + elif newtype in ["rotquat", "quaternion"]: + rotations = geometry.axis_angle_to_quaternion(rotations) + return rotations + elif newtype in ["rotvec", "axisangle"]: + return rotations + else: + raise NotImplementedError + + +def matrix_to(newtype, rotations): + if newtype in ["matrix"]: + return rotations + if newtype in ["rotmat"]: + rotations = rotations.reshape((*rotations.shape[:-2], 9)) + return rotations + elif newtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.matrix_to_rotation_6d(rotations) + return rotations + elif newtype in ["rotquat", "quaternion"]: + rotations = geometry.matrix_to_quaternion(rotations) + return rotations + elif newtype in ["rotvec", "axisangle"]: + rotations = geometry.matrix_to_axis_angle(rotations) + return rotations + else: + raise NotImplementedError + + +def to_matrix(oldtype, rotations): + if oldtype in ["matrix"]: + return rotations + if oldtype in ["rotmat"]: + rotations = rotations.reshape((*rotations.shape[:-2], 3, 3)) + return rotations + elif oldtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.rotation_6d_to_matrix(rotations) + return rotations + elif oldtype in ["rotquat", "quaternion"]: + rotations = geometry.quaternion_to_matrix(rotations) + return rotations + elif oldtype in ["rotvec", "axisangle"]: + rotations = geometry.axis_angle_to_matrix(rotations) + return rotations + else: + raise NotImplementedError diff --git a/Evaluator_272/mld/utils/fixseed.py b/Evaluator_272/mld/utils/fixseed.py new file mode 100644 index 0000000000000000000000000000000000000000..a43a273b138c45dccafef4da3628dd4c2a3f84a4 --- /dev/null +++ b/Evaluator_272/mld/utils/fixseed.py @@ -0,0 +1,18 @@ +import numpy as np +import torch +import random + + +def fixseed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + +SEED = 10 +EVALSEED = 0 +# Provoc warning: not fully functionnal yet +# torch.set_deterministic(True) +torch.backends.cudnn.benchmark = False + +fixseed(SEED) diff --git a/Evaluator_272/mld/utils/geometry.py b/Evaluator_272/mld/utils/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..86bf6ae2bcee2580d44281fcae9125f70470e952 --- /dev/null +++ b/Evaluator_272/mld/utils/geometry.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import torch +import numpy as np +from torch.nn import functional as F + + +def matrix_of_angles(cos, sin, inv=False, dim=2): + assert dim in [2, 3] + sin = -sin if inv else sin + if dim == 2: + row1 = torch.stack((cos, -sin), axis=-1) + row2 = torch.stack((sin, cos), axis=-1) + return torch.stack((row1, row2), axis=-2) + elif dim == 3: + row1 = torch.stack((cos, -sin, 0 * cos), axis=-1) + row2 = torch.stack((sin, cos, 0 * cos), axis=-1) + row3 = torch.stack((0 * sin, 0 * cos, 1 + 0 * cos), axis=-1) + return torch.stack((row1, row2, row3), axis=-2) + + +def matrot2axisangle(matrots): + # This function is borrowed from https://github.com/davrempe/humor/utils/transforms.py + # axisang N x 3 + ''' + :param matrots: N*num_joints*9 + :return: N*num_joints*3 + ''' + import cv2 + batch_size = matrots.shape[0] + matrots = matrots.reshape([batch_size, -1, 9]) + out_axisangle = [] + for mIdx in range(matrots.shape[0]): + cur_axisangle = [] + for jIdx in range(matrots.shape[1]): + a = cv2.Rodrigues(matrots[mIdx, + jIdx:jIdx + 1, :].reshape(3, + 3))[0].reshape( + (1, 3)) + cur_axisangle.append(a) + + out_axisangle.append(np.array(cur_axisangle).reshape([1, -1, 3])) + return np.vstack(out_axisangle) + + +def axisangle2matrots(axisangle): + # This function is borrowed from https://github.com/davrempe/humor/utils/transforms.py + # axisang N x 3 + ''' + :param axisangle: N*num_joints*3 + :return: N*num_joints*9 + ''' + import cv2 + batch_size = axisangle.shape[0] + axisangle = axisangle.reshape([batch_size, -1, 3]) + out_matrot = [] + for mIdx in range(axisangle.shape[0]): + cur_axisangle = [] + for jIdx in range(axisangle.shape[1]): + a = cv2.Rodrigues(axisangle[mIdx, jIdx:jIdx + 1, :].reshape(1, + 3))[0] + cur_axisangle.append(a) + + out_matrot.append(np.array(cur_axisangle).reshape([1, -1, 9])) + return np.vstack(out_matrot) + + +def batch_rodrigues(axisang): + # This function is borrowed from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py#L37 + # axisang N x 3 + axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1) + angle = torch.unsqueeze(axisang_norm, -1) + axisang_normalized = torch.div(axisang, angle) + angle = angle * 0.5 + v_cos = torch.cos(angle) + v_sin = torch.sin(angle) + + quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1) + rot_mat = quat2mat(quat) + rot_mat = rot_mat.view(rot_mat.shape[0], 9) + return rot_mat + + +def quat2mat(quat): + """ + This function is borrowed from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py#L50 + + Convert quaternion coefficients to rotation matrix. + Args: + quat: size = [batch_size, 4] 4 <===>(w, x, y, z) + Returns: + Rotation matrix corresponding to the quaternion -- size = [batch_size, 3, 3] + """ + norm_quat = quat + norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) + w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, + 2], norm_quat[:, + 3] + + batch_size = quat.size(0) + + w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) + wx, wy, wz = w * x, w * y, w * z + xy, xz, yz = x * y, x * z, y * z + + rotMat = torch.stack([ + w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, + w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, + w2 - x2 - y2 + z2 + ], + dim=1).view(batch_size, 3, 3) + return rotMat + + +def rotation_matrix_to_angle_axis(rotation_matrix): + """ + This function is borrowed from https://github.com/kornia/kornia + + Convert 3x4 rotation matrix to Rodrigues vector + + Args: + rotation_matrix (Tensor): rotation matrix. + + Returns: + Tensor: Rodrigues vector transformation. + + Shape: + - Input: :math:`(N, 3, 4)` + - Output: :math:`(N, 3)` + + Example: + >>> input = torch.rand(2, 3, 4) # Nx4x4 + >>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3 + """ + if rotation_matrix.shape[1:] == (3, 3): + rot_mat = rotation_matrix.reshape(-1, 3, 3) + hom = torch.tensor([0, 0, 1], + dtype=torch.float32, + device=rotation_matrix.device).reshape( + 1, 3, 1).expand(rot_mat.shape[0], -1, -1) + rotation_matrix = torch.cat([rot_mat, hom], dim=-1) + + quaternion = rotation_matrix_to_quaternion(rotation_matrix) + aa = quaternion_to_angle_axis(quaternion) + aa[torch.isnan(aa)] = 0.0 + return aa + + +def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: + """ + This function is borrowed from https://github.com/kornia/kornia + + Convert quaternion vector to angle axis of rotation. + + Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h + + Args: + quaternion (torch.Tensor): tensor with quaternions. + + Return: + torch.Tensor: tensor with angle axis of rotation. + + Shape: + - Input: :math:`(*, 4)` where `*` means, any number of dimensions + - Output: :math:`(*, 3)` + + Example: + >>> quaternion = torch.rand(2, 4) # Nx4 + >>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3 + """ + if not torch.is_tensor(quaternion): + raise TypeError("Input type is not a torch.Tensor. Got {}".format( + type(quaternion))) + + if not quaternion.shape[-1] == 4: + raise ValueError( + "Input must be a tensor of shape Nx4 or 4. Got {}".format( + quaternion.shape)) + # unpack input and compute conversion + q1: torch.Tensor = quaternion[..., 1] + q2: torch.Tensor = quaternion[..., 2] + q3: torch.Tensor = quaternion[..., 3] + sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 + + sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) + cos_theta: torch.Tensor = quaternion[..., 0] + two_theta: torch.Tensor = 2.0 * torch.where( + cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), + torch.atan2(sin_theta, cos_theta)) + + k_pos: torch.Tensor = two_theta / sin_theta + k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) + k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) + + angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] + angle_axis[..., 0] += q1 * k + angle_axis[..., 1] += q2 * k + angle_axis[..., 2] += q3 * k + return angle_axis + + +def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6): + """ + This function is borrowed from https://github.com/kornia/kornia + + Convert 3x4 rotation matrix to 4d quaternion vector + + This algorithm is based on algorithm described in + https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201 + + Args: + rotation_matrix (Tensor): the rotation matrix to convert. + + Return: + Tensor: the rotation in quaternion + + Shape: + - Input: :math:`(N, 3, 4)` + - Output: :math:`(N, 4)` + + Example: + >>> input = torch.rand(4, 3, 4) # Nx3x4 + >>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4 + """ + if not torch.is_tensor(rotation_matrix): + raise TypeError("Input type is not a torch.Tensor. Got {}".format( + type(rotation_matrix))) + + if len(rotation_matrix.shape) > 3: + raise ValueError( + "Input size must be a three dimensional tensor. Got {}".format( + rotation_matrix.shape)) + if not rotation_matrix.shape[-2:] == (3, 4): + raise ValueError( + "Input size must be a N x 3 x 4 tensor. Got {}".format( + rotation_matrix.shape)) + + rmat_t = torch.transpose(rotation_matrix, 1, 2) + + mask_d2 = rmat_t[:, 2, 2] < eps + + mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1] + mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1] + + t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2] + q0 = torch.stack([ + rmat_t[:, 1, 2] - rmat_t[:, 2, 1], t0, + rmat_t[:, 0, 1] + rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2] + ], -1) + t0_rep = t0.repeat(4, 1).t() + + t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2] + q1 = torch.stack([ + rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] + rmat_t[:, 1, 0], + t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1] + ], -1) + t1_rep = t1.repeat(4, 1).t() + + t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2] + q2 = torch.stack([ + rmat_t[:, 0, 1] - rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2], + rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2 + ], -1) + t2_rep = t2.repeat(4, 1).t() + + t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2] + q3 = torch.stack([ + t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1], + rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] - rmat_t[:, 1, 0] + ], -1) + t3_rep = t3.repeat(4, 1).t() + + mask_c0 = mask_d2 * mask_d0_d1 + mask_c1 = mask_d2 * ~mask_d0_d1 + mask_c2 = ~mask_d2 * mask_d0_nd1 + mask_c3 = ~mask_d2 * ~mask_d0_nd1 + mask_c0 = mask_c0.view(-1, 1).type_as(q0) + mask_c1 = mask_c1.view(-1, 1).type_as(q1) + mask_c2 = mask_c2.view(-1, 1).type_as(q2) + mask_c3 = mask_c3.view(-1, 1).type_as(q3) + + q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3 + q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa + t2_rep * mask_c2 + t3_rep * mask_c3) # noqa + q *= 0.5 + return q + + +def estimate_translation_np(S, + joints_2d, + joints_conf, + focal_length=5000., + img_size=224.): + """ + This function is borrowed from https://github.com/nkolot/SPIN/utils/geometry.py + + Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. + Input: + S: (25, 3) 3D joint locations + joints: (25, 3) 2D joint locations and confidence + Returns: + (3,) camera translation vector + """ + + num_joints = S.shape[0] + # focal length + f = np.array([focal_length, focal_length]) + # optical center + center = np.array([img_size / 2., img_size / 2.]) + + # transformations + Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1) + XY = np.reshape(S[:, 0:2], -1) + O = np.tile(center, num_joints) + F = np.tile(f, num_joints) + weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1) + + # least squares + Q = np.array([ + F * np.tile(np.array([1, 0]), num_joints), + F * np.tile(np.array([0, 1]), num_joints), + O - np.reshape(joints_2d, -1) + ]).T + c = (np.reshape(joints_2d, -1) - O) * Z - F * XY + + # weighted least squares + W = np.diagflat(weight2) + Q = np.dot(W, Q) + c = np.dot(W, c) + + # square matrix + A = np.dot(Q.T, Q) + b = np.dot(Q.T, c) + + # solution + trans = np.linalg.solve(A, b) + + return trans + + +def estimate_translation(S, joints_2d, focal_length=5000., img_size=224.): + """ + This function is borrowed from https://github.com/nkolot/SPIN/utils/geometry.py + + Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. + Input: + S: (B, 49, 3) 3D joint locations + joints: (B, 49, 3) 2D joint locations and confidence + Returns: + (B, 3) camera translation vectors + """ + + device = S.device + # Use only joints 25:49 (GT joints) + S = S[:, 25:, :].cpu().numpy() + joints_2d = joints_2d[:, 25:, :].cpu().numpy() + joints_conf = joints_2d[:, :, -1] + joints_2d = joints_2d[:, :, :-1] + trans = np.zeros((S.shape[0], 3), dtype=np.float6432) + # Find the translation for each example in the batch + for i in range(S.shape[0]): + S_i = S[i] + joints_i = joints_2d[i] + conf_i = joints_conf[i] + trans[i] = estimate_translation_np(S_i, + joints_i, + conf_i, + focal_length=focal_length, + img_size=img_size) + return torch.from_numpy(trans).to(device) + + +def rot6d_to_rotmat_spin(x): + """Convert 6D rotation representation to 3x3 rotation matrix. + Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 + Input: + (B,6) Batch of 6-D rotation representations + Output: + (B,3,3) Batch of corresponding rotation matrices + """ + x = x.view(-1, 3, 2) + a1 = x[:, :, 0] + a2 = x[:, :, 1] + b1 = F.normalize(a1) + b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) + + # inp = a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1 + # denom = inp.pow(2).sum(dim=1).sqrt().unsqueeze(-1) + 1e-8 + # b2 = inp / denom + + b3 = torch.cross(b1, b2) + return torch.stack((b1, b2, b3), dim=-1) + + +def rot6d_to_rotmat(x): + x = x.view(-1, 3, 2) + + # Normalize the first vector + b1 = F.normalize(x[:, :, 0], dim=1, eps=1e-6) + + dot_prod = torch.sum(b1 * x[:, :, 1], dim=1, keepdim=True) + # Compute the second vector by finding the orthogonal complement to it + b2 = F.normalize(x[:, :, 1] - dot_prod * b1, dim=-1, eps=1e-6) + + # Finish building the basis by taking the cross product + b3 = torch.cross(b1, b2, dim=1) + rot_mats = torch.stack([b1, b2, b3], dim=-1) + + return rot_mats + + +import mld.utils.rotation_conversions as rotation_conversions + + +def rot6d(x_rotations, pose_rep): + time, njoints, feats = x_rotations.shape + + # Compute rotations (convert only masked sequences output) + if pose_rep == "rotvec": + rotations = rotation_conversions.axis_angle_to_matrix(x_rotations) + elif pose_rep == "rotmat": + rotations = x_rotations.view(njoints, 3, 3) + elif pose_rep == "rotquat": + rotations = rotation_conversions.quaternion_to_matrix(x_rotations) + elif pose_rep == "rot6d": + rotations = rotation_conversions.rotation_6d_to_matrix(x_rotations) + else: + raise NotImplementedError("No geometry for this one.") + + rotations_6d = rotation_conversions.matrix_to_rotation_6d(rotations) + return rotations_6d + + +def rot6d_batch(x_rotations, pose_rep): + nsamples, time, njoints, feats = x_rotations.shape + + # Compute rotations (convert only masked sequences output) + if pose_rep == "rotvec": + rotations = rotation_conversions.axis_angle_to_matrix(x_rotations) + elif pose_rep == "rotmat": + rotations = x_rotations.view(-1, njoints, 3, 3) + elif pose_rep == "rotquat": + rotations = rotation_conversions.quaternion_to_matrix(x_rotations) + elif pose_rep == "rot6d": + rotations = rotation_conversions.rotation_6d_to_matrix(x_rotations) + else: + raise NotImplementedError("No geometry for this one.") + + rotations_6d = rotation_conversions.matrix_to_rotation_6d(rotations) + return rotations_6d + + +def rot6d_to_rotvec_batch(pose): + # nsamples, time, njoints, feats = rot6d.shape + bs, nfeats = pose.shape + rot6d = pose.reshape(bs, 24, 6) + rotations = rotation_conversions.rotation_6d_to_matrix(rot6d) + rotvec = rotation_conversions.matrix_to_axis_angle(rotations) + return rotvec.reshape(bs, 24 * 3) diff --git a/Evaluator_272/mld/utils/joints.py b/Evaluator_272/mld/utils/joints.py new file mode 100644 index 0000000000000000000000000000000000000000..ffbddbb79289fa1c71d007a4c37e2bba34e847e0 --- /dev/null +++ b/Evaluator_272/mld/utils/joints.py @@ -0,0 +1,291 @@ +mmm_joints = [ + "root", + "BP", + "BT", + "BLN", + "BUN", + "LS", + "LE", + "LW", + "RS", + "RE", + "RW", + "LH", + "LK", + "LA", + "LMrot", + "LF", + "RH", + "RK", + "RA", + "RMrot", + "RF", +] + +humanml3d_joints = [ + "root", + "RH", + "LH", + "BP", + "RK", + "LK", + "BT", + "RMrot", + "LMrot", + "BLN", + "RF", + "LF", + "BMN", + "RSI", + "LSI", + "BUN", + "RS", + "LS", + "RE", + "LE", + "RW", + "LW", +] + + +motionx_joints = [ + "root", + "RH", + "LH", + "BP", + "RK", + "LK", + "BT", + "RMrot", + "LMrot", + "BLN", + "RF", + "LF", + "BMN", + "RSI", + "LSI", + "BUN", + "RS", + "LS", + "RE", + "LE", + "RW", + "LW", +] + +smplh_joints = [ + "pelvis", + "left_hip", + "right_hip", + "spine1", + "left_knee", + "right_knee", + "spine2", + "left_ankle", + "right_ankle", + "spine3", + "left_foot", + "right_foot", + "neck", + "left_collar", + "right_collar", + "head", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_index1", + "left_index2", + "left_index3", + "left_middle1", + "left_middle2", + "left_middle3", + "left_pinky1", + "left_pinky2", + "left_pinky3", + "left_ring1", + "left_ring2", + "left_ring3", + "left_thumb1", + "left_thumb2", + "left_thumb3", + "right_index1", + "right_index2", + "right_index3", + "right_middle1", + "right_middle2", + "right_middle3", + "right_pinky1", + "right_pinky2", + "right_pinky3", + "right_ring1", + "right_ring2", + "right_ring3", + "right_thumb1", + "right_thumb2", + "right_thumb3", + "nose", + "right_eye", + "left_eye", + "right_ear", + "left_ear", + "left_big_toe", + "left_small_toe", + "left_heel", + "right_big_toe", + "right_small_toe", + "right_heel", + "left_thumb", + "left_index", + "left_middle", + "left_ring", + "left_pinky", + "right_thumb", + "right_index", + "right_middle", + "right_ring", + "right_pinky", +] + +smplnh_joints = [ + "pelvis", + "left_hip", + "right_hip", + "spine1", + "left_knee", + "right_knee", + "spine2", + "left_ankle", + "right_ankle", + "spine3", + "left_foot", + "right_foot", + "neck", + "left_collar", + "right_collar", + "head", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", +] + + +mmm2smplh_correspondence = { + "root": "pelvis", + "BP": "spine1", + "BT": "spine3", + "BLN": "neck", + "BUN": "head", + "LS": "left_shoulder", + "LE": "left_elbow", + "LW": "left_wrist", + "RS": "right_shoulder", + "RE": "right_elbow", + "RW": "right_wrist", + "LH": "left_hip", + "LK": "left_knee", + "LA": "left_ankle", + "LMrot": "left_heel", + "LF": "left_foot", + "RH": "right_hip", + "RK": "right_knee", + "RA": "right_ankle", + "RMrot": "right_heel", + "RF": "right_foot", +} + +smplh2mmm_correspondence = {val: key for key, val in mmm2smplh_correspondence.items()} +smplh2mmm_indexes = [ + smplh_joints.index(mmm2smplh_correspondence[x]) for x in mmm_joints +] + +smplnh2smplh_correspondence = {key: key for key in smplnh_joints} +smplh2smplnh_correspondence = { + val: key for key, val in smplnh2smplh_correspondence.items() +} + +smplh2smplnh_indexes = [ + smplh_joints.index(smplnh2smplh_correspondence[x]) for x in smplnh_joints +] + + +mmm_kinematic_tree = [ + [0, 1, 2, 3, 4], # body + [3, 5, 6, 7], # right arm + [3, 8, 9, 10], # left arm + [0, 11, 12, 13, 14, 15], # right leg + [0, 16, 17, 18, 19, 20], +] # left leg + +humanml3d_kinematic_tree = [ + [0, 3, 6, 9, 12, 15], # body + [9, 14, 17, 19, 21], # right arm + [9, 13, 16, 18, 20], # left arm + [0, 2, 5, 8, 11], # right leg + [0, 1, 4, 7, 10], +] # left leg + +smplh_to_mmm_scaling_factor = 480 / 0.75 +mmm_to_smplh_scaling_factor = 0.75 / 480 + +mmm_joints_info = { + "root": mmm_joints.index("root"), + "feet": [ + mmm_joints.index("LMrot"), + mmm_joints.index("RMrot"), + mmm_joints.index("LF"), + mmm_joints.index("RF"), + ], + "shoulders": [mmm_joints.index("LS"), mmm_joints.index("RS")], + "hips": [mmm_joints.index("LH"), mmm_joints.index("RH")], +} + +smplnh_joints_info = { + "root": smplnh_joints.index("pelvis"), + "feet": [ + smplnh_joints.index("left_ankle"), + smplnh_joints.index("right_ankle"), + smplnh_joints.index("left_foot"), + smplnh_joints.index("right_foot"), + ], + "shoulders": [ + smplnh_joints.index("left_shoulder"), + smplnh_joints.index("right_shoulder"), + ], + "hips": [smplnh_joints.index("left_hip"), smplnh_joints.index("right_hip")], +} + + +infos = {"mmm": mmm_joints_info, "smplnh": smplnh_joints_info} + +smplh_indexes = {"mmm": smplh2mmm_indexes, "smplnh": smplh2smplnh_indexes} + + +root_joints = { + "mmm": mmm_joints_info["root"], + "mmmns": mmm_joints_info["root"], + "smplmmm": mmm_joints_info["root"], + "smplnh": smplnh_joints_info["root"], + "smplh": smplh_joints.index("pelvis"), +} + + +def get_root_idx(joinstype): + return root_joints[joinstype] + + +# def mmm2smpl(joints_mmm): +# mmm2smplnh_indexes = [] +# for x in smplnh_joints: +# if x in smplh2mmm_correspondence: +# mmm2smplnh_indexes.append(mmm_joints.index(smplh2mmm_correspondence[x])) + +# spine2 = 0.5*(joints[mmm_joints.index("spine1")] + joints[mmm_joints.index("spine3")]) + +# joints = joints_mmm[indexes] +# return joints diff --git a/Evaluator_272/mld/utils/logger.py b/Evaluator_272/mld/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..a9eacb06597aafa9973ff63fb75309327622709a --- /dev/null +++ b/Evaluator_272/mld/utils/logger.py @@ -0,0 +1,71 @@ +from pathlib import Path +import os +import time +import logging +from omegaconf import OmegaConf +from pytorch_lightning.utilities.rank_zero import rank_zero_only + + +def create_logger(cfg, phase='train'): + # root dir set by cfg + root_output_dir = Path(cfg.FOLDER) + # set up logger + if not root_output_dir.exists(): + print('=> creating {}'.format(root_output_dir)) + root_output_dir.mkdir() + + cfg_name = cfg.NAME + model = cfg.model.model_type + cfg_name = os.path.basename(cfg_name).split('.')[0] + + final_output_dir = root_output_dir / model / cfg_name + cfg.FOLDER_EXP = str(final_output_dir) + + time_str = time.strftime('%Y-%m-%d-%H-%M-%S') + + new_dir(cfg, phase, time_str, final_output_dir) + + head = '%(asctime)-15s %(message)s' + logger = config_logger(final_output_dir, time_str, phase, head) + if logger is None: + logger = logging.getLogger() + logger.setLevel(logging.CRITICAL) + logging.basicConfig(format=head) + return logger + + +@rank_zero_only +def config_logger(final_output_dir, time_str, phase, head): + log_file = '{}_{}_{}.log'.format('log', time_str, phase) + final_log_file = final_output_dir / log_file + logging.basicConfig(filename=str(final_log_file)) + logger = logging.getLogger() + logger.setLevel(logging.INFO) + console = logging.StreamHandler() + formatter = logging.Formatter(head) + console.setFormatter(formatter) + logging.getLogger('').addHandler(console) + file_handler = logging.FileHandler(final_log_file, 'w') + file_handler.setFormatter(logging.Formatter(head)) + file_handler.setLevel(logging.INFO) + logging.getLogger('').addHandler(file_handler) + return logger + + +@rank_zero_only +def new_dir(cfg, phase, time_str, final_output_dir): + # new experiment folder + cfg.TIME = str(time_str) + if os.path.exists( + final_output_dir) and cfg.TRAIN.RESUME is None and not cfg.DEBUG: + file_list = sorted(os.listdir(final_output_dir), reverse=True) + for item in file_list: + if item.endswith('.log'): + os.rename(str(final_output_dir), + str(final_output_dir) + '_' + cfg.TIME) + break + final_output_dir.mkdir(parents=True, exist_ok=True) + # write config yaml + config_file = '{}_{}_{}.yaml'.format('config', time_str, phase) + final_config_file = final_output_dir / config_file + OmegaConf.save(config=cfg, f=final_config_file) diff --git a/Evaluator_272/mld/utils/misc.py b/Evaluator_272/mld/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..4f2a68d68019098e66905e0e21cb96678031bab0 --- /dev/null +++ b/Evaluator_272/mld/utils/misc.py @@ -0,0 +1,29 @@ +import torch + + +def to_numpy(tensor): + if torch.is_tensor(tensor): + return tensor.cpu().numpy() + elif type(tensor).__module__ != 'numpy': + raise ValueError("Cannot convert {} to numpy array".format( + type(tensor))) + return tensor + + +def to_torch(ndarray): + if type(ndarray).__module__ == 'numpy': + return torch.from_numpy(ndarray) + elif not torch.is_tensor(ndarray): + raise ValueError("Cannot convert {} to torch tensor".format( + type(ndarray))) + return ndarray + + +def cleanexit(): + import sys + import os + try: + sys.exit(0) + except SystemExit: + os._exit(0) + diff --git a/Evaluator_272/mld/utils/rotation_conversions.py b/Evaluator_272/mld/utils/rotation_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..770c3bf36f05fcaf89cbb03e17035357f3c0a4df --- /dev/null +++ b/Evaluator_272/mld/utils/rotation_conversions.py @@ -0,0 +1,551 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# Check PYTORCH3D_LICENCE before use + +import functools +from typing import Optional + +import torch +import torch.nn.functional as F + + +""" +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) + +This matrix can be applied to column vectors by post multiplication +by the points e.g. + + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points + +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: + +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +""" + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a, b): + """ + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + + Returns: + Tensor of the same shape as a with the signs of b. + """ + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x): + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + + +def matrix_to_quaternion(matrix): + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + m00 = matrix[..., 0, 0] + m11 = matrix[..., 1, 1] + m22 = matrix[..., 2, 2] + o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22) + x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22) + y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22) + z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22) + o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2]) + o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0]) + o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1]) + return torch.stack((o0, o1, o2, o3), -1) + + +def _axis_angle_rotation(axis: str, angle): + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + if axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + if axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles, convention: str): + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) + return functools.reduce(torch.matmul, matrices) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +): + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str): + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + + +def matrix_to_euler_angles(matrix, convention: str): + """ + Convert rotations given as rotation matrices to Euler angles in radians. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + + Returns: + Euler angles in radians as tensor of shape (..., 3). + """ + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Quaternions as tensor of shape (N, 4). + """ + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random rotations as 3x3 rotation matrices. + + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + """ + quaternions = random_quaternions( + n, dtype=dtype, device=device, requires_grad=requires_grad + ) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate a single random 3x3 rotation matrix. + + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + requires_grad: Whether the resulting tensor should have the gradient + flag set + + Returns: + Rotation matrix as tensor of shape (3, 3). + """ + return random_rotations(1, dtype, device, requires_grad)[0] + + +def standardize_quaternion(quaternions): + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a, b): + """ + Multiply two quaternions. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + """ + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a, b): + """ + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + """ + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion): + """ + Given a quaternion representing rotation, get the quaternion representing + its inverse. + + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + """ + + return quaternion * quaternion.new_tensor([1, -1, -1, -1]) + + +def quaternion_apply(quaternion, point): + """ + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + + Returns: + Tensor of rotated points of shape (..., 3). + """ + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, f{point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle): + """ + Convert rotations given as axis/angle to rotation matrices. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix): + """ + Convert rotations given as rotation matrices to axis/angle. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle): + """ + Convert rotations given as axis/angle to quaternions. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions): + """ + Convert rotations given as quaternions to axis/angle. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + """ + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalisation per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + + Returns: + batch of rotation matrices of size (*, 3, 3) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + """ + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + + Returns: + 6D rotation representation, of size (*, 6) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) diff --git a/Evaluator_272/mld/utils/sample_utils.py b/Evaluator_272/mld/utils/sample_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..724b109af5ecd08e20cf0af681df317b390d4d44 --- /dev/null +++ b/Evaluator_272/mld/utils/sample_utils.py @@ -0,0 +1,18 @@ +import logging +from pathlib import Path +logger = logging.getLogger(__name__) + +def cfg_mean_nsamples_resolution(cfg): + if cfg.mean and cfg.number_of_samples > 1: + logger.error("All the samples will be the mean.. cfg.number_of_samples=1 will be forced.") + cfg.number_of_samples = 1 + + return cfg.number_of_samples == 1 + + +def get_path(sample_path: Path, is_amass: bool, gender: str, split: str, onesample: bool, mean: bool, fact: float): + extra_str = ("_mean" if mean else "") if onesample else "_multi" + fact_str = "" if fact == 1 else f"{fact}_" + gender_str = gender + "_" if is_amass else "" + path = sample_path / f"{fact_str}{gender_str}{split}{extra_str}" + return path diff --git a/Evaluator_272/mld/utils/temos_utils.py b/Evaluator_272/mld/utils/temos_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd47eb1437a39c79c645d0d1f6e558a4fe5109f --- /dev/null +++ b/Evaluator_272/mld/utils/temos_utils.py @@ -0,0 +1,133 @@ +from typing import Dict, List + +import numpy as np +import torch +from torch import Tensor + +import mld.utils.geometry as geometry + + +def lengths_to_mask(lengths: List[int], + device: torch.device, + max_len: int = None) -> Tensor: + lengths = torch.tensor(lengths, device=device) + max_len = max_len if max_len else max(lengths) + mask = torch.arange(max_len, device=device).expand( + len(lengths), max_len) < lengths.unsqueeze(1) + return mask + + +def detach_to_numpy(tensor): + return tensor.detach().cpu().numpy() + + +def remove_padding(tensors, lengths): + return [ + tensor[:tensor_length] + for tensor, tensor_length in zip(tensors, lengths) + ] + + +def nfeats_of(rottype): + if rottype in ["rotvec", "axisangle"]: + return 3 + elif rottype in ["rotquat", "quaternion"]: + return 4 + elif rottype in ["rot6d", "6drot", "rotation6d"]: + return 6 + elif rottype in ["rotmat"]: + return 9 + else: + return TypeError("This rotation type doesn't have features.") + + +def axis_angle_to(newtype, rotations): + if newtype in ["matrix"]: + rotations = geometry.axis_angle_to_matrix(rotations) + return rotations + elif newtype in ["rotmat"]: + rotations = geometry.axis_angle_to_matrix(rotations) + rotations = matrix_to("rotmat", rotations) + return rotations + elif newtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.axis_angle_to_matrix(rotations) + rotations = matrix_to("rot6d", rotations) + return rotations + elif newtype in ["rotquat", "quaternion"]: + rotations = geometry.axis_angle_to_quaternion(rotations) + return rotations + elif newtype in ["rotvec", "axisangle"]: + return rotations + else: + raise NotImplementedError + + +def matrix_to(newtype, rotations): + if newtype in ["matrix"]: + return rotations + if newtype in ["rotmat"]: + rotations = rotations.reshape((*rotations.shape[:-2], 9)) + return rotations + elif newtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.matrix_to_rotation_6d(rotations) + return rotations + elif newtype in ["rotquat", "quaternion"]: + rotations = geometry.matrix_to_quaternion(rotations) + return rotations + elif newtype in ["rotvec", "axisangle"]: + rotations = geometry.matrix_to_axis_angle(rotations) + return rotations + else: + raise NotImplementedError + + +def to_matrix(oldtype, rotations): + if oldtype in ["matrix"]: + return rotations + if oldtype in ["rotmat"]: + rotations = rotations.reshape((*rotations.shape[:-2], 3, 3)) + return rotations + elif oldtype in ["rot6d", "6drot", "rotation6d"]: + rotations = geometry.rotation_6d_to_matrix(rotations) + return rotations + elif oldtype in ["rotquat", "quaternion"]: + rotations = geometry.quaternion_to_matrix(rotations) + return rotations + elif oldtype in ["rotvec", "axisangle"]: + rotations = geometry.axis_angle_to_matrix(rotations) + return rotations + else: + raise NotImplementedError + + +# TODO: use a real subsampler.. +def subsample(num_frames, last_framerate, new_framerate): + step = int(last_framerate / new_framerate) + assert step >= 1 + frames = np.arange(0, num_frames, step) + return frames + + +# TODO: use a real upsampler.. +def upsample(motion, last_framerate, new_framerate): + step = int(new_framerate / last_framerate) + assert step >= 1 + + # Alpha blending => interpolation + alpha = np.linspace(0, 1, step + 1) + last = np.einsum("l,...->l...", 1 - alpha, motion[:-1]) + new = np.einsum("l,...->l...", alpha, motion[1:]) + + chuncks = (last + new)[:-1] + output = np.concatenate(chuncks.swapaxes(1, 0)) + # Don't forget the last one + output = np.concatenate((output, motion[[-1]])) + return output + + +if __name__ == "__main__": + motion = np.arange(105) + submotion = motion[subsample(len(motion), 100.0, 12.5)] + newmotion = upsample(submotion, 12.5, 100) + + print(newmotion) diff --git a/Evaluator_272/mld/utils/tensors.py b/Evaluator_272/mld/utils/tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..166143893e5ad1494e3bdf8a9a12261f61e77335 --- /dev/null +++ b/Evaluator_272/mld/utils/tensors.py @@ -0,0 +1,74 @@ +import torch + + +def lengths_to_mask(lengths): + max_len = max(lengths) + mask = torch.arange(max_len, device=lengths.device).expand( + len(lengths), max_len) < lengths.unsqueeze(1) + return mask + + +def collate_tensors(batch): + dims = batch[0].dim() + max_size = [max([b.size(i) for b in batch]) for i in range(dims)] + size = (len(batch),) + tuple(max_size) + canvas = batch[0].new_zeros(size=size) + for i, b in enumerate(batch): + sub_tensor = canvas[i] + for d in range(dims): + sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) + sub_tensor.add_(b) + return canvas + + +def collate(batch): + databatch = [b[0] for b in batch] + labelbatch = [b[1] for b in batch] + lenbatch = [len(b[0][0][0]) for b in batch] + + databatchTensor = collate_tensors(databatch) + labelbatchTensor = torch.as_tensor(labelbatch) + lenbatchTensor = torch.as_tensor(lenbatch) + + maskbatchTensor = lengths_to_mask(lenbatchTensor) + # x - [bs, njoints, nfeats, lengths] + # - nfeats, the representation of a joint + # y - [bs] + # mask - [bs, lengths] + # lengths - [bs] + batch = {"x": databatchTensor, "y": labelbatchTensor, + "mask": maskbatchTensor, 'lengths': lenbatchTensor} + return batch + + +# slow version with padding +def collate_data3d_slow(batch): + batchTensor = {} + for key in batch[0].keys(): + databatch = [b[key] for b in batch] + batchTensor[key] = collate_tensors(databatch) + batch = batchTensor + # theta - [bs, lengths, 85], theta shape (85,) + # - (np.array([1., 0., 0.]), pose(72), shape(10)), axis=0) + # kp_2d - [bs, lengths, njoints, nfeats], nfeats (x,y,weight) + # kp_3d - [bs, lengths, njoints, nfeats], nfeats (x,y,z) + # w_smpl - [bs, lengths] zeros + # w_3d - [bs, lengths] zeros + return batch + +def collate_data3d(batch): + batchTensor = {} + for key in batch[0].keys(): + databatch = [b[key] for b in batch] + if key == "paths": + batchTensor[key] = databatch + else: + batchTensor[key] = torch.stack(databatch,axis=0) + batch = batchTensor + # theta - [bs, lengths, 85], theta shape (85,) + # - (np.array([1., 0., 0.]), pose(72), shape(10)), axis=0) + # kp_2d - [bs, lengths, njoints, nfeats], nfeats (x,y,weight) + # kp_3d - [bs, lengths, njoints, nfeats], nfeats (x,y,z) + # w_smpl - [bs, lengths] zeros + # w_3d - [bs, lengths] zeros + return batch diff --git a/Evaluator_272/train.py b/Evaluator_272/train.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1936d781699860c5bd921909cceea787b93019 --- /dev/null +++ b/Evaluator_272/train.py @@ -0,0 +1,255 @@ +import os +from pprint import pformat + +import pytorch_lightning as pl +import torch +from omegaconf import OmegaConf +from pytorch_lightning import loggers as pl_loggers +from pytorch_lightning.callbacks import ModelCheckpoint +# from pytorch_lightning.strategies.ddp import DDPStrategy + +from mld.callback import ProgressLogger +from mld.config import parse_args +from mld.data.get_data import get_datasets +from mld.models.get_model import get_model +from mld.utils.logger import create_logger + + +def main(): + cfg = parse_args() # parse config file + + # create logger + logger = create_logger(cfg, phase="train") + + # resume + if cfg.TRAIN.RESUME: + resume = cfg.TRAIN.RESUME + backcfg = cfg.TRAIN.copy() + if os.path.exists(resume): + file_list = sorted(os.listdir(resume), reverse=True) + for item in file_list: + if item.endswith(".yaml"): + cfg = OmegaConf.load(os.path.join(resume, item)) + cfg.TRAIN = backcfg + break + checkpoints = sorted(os.listdir(os.path.join( + resume, "checkpoints")), + key=lambda x: int(x[6:-5]), + reverse=True) + for checkpoint in checkpoints: + if "epoch=" in checkpoint: + cfg.TRAIN.PRETRAINED = os.path.join( + resume, "checkpoints", checkpoint) + break + if os.path.exists(os.path.join(resume, "wandb")): + wandb_list = sorted(os.listdir(os.path.join(resume, "wandb")), + reverse=True) + for item in wandb_list: + if "run-" in item: + cfg.LOGGER.WANDB.RESUME_ID = item.split("-")[-1] + + else: + raise ValueError("Resume path is not right.") + # set seed + pl.seed_everything(cfg.SEED_VALUE) + + # gpu setting + if cfg.ACCELERATOR == "gpu": + # os.environ["PYTHONWARNINGS"] = "ignore" + os.environ["TOKENIZERS_PARALLELISM"] = "false" + # os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(str(x) for x in cfg.DEVICE) + + # tensorboard logger and wandb logger + loggers = [] + if cfg.LOGGER.WANDB.PROJECT: + wandb_logger = pl_loggers.WandbLogger( + project=cfg.LOGGER.WANDB.PROJECT, + offline=cfg.LOGGER.WANDB.OFFLINE, + id=cfg.LOGGER.WANDB.RESUME_ID, + save_dir=cfg.FOLDER_EXP, + version="", + name=cfg.NAME, + anonymous=False, + log_model=False, + ) + loggers.append(wandb_logger) + if cfg.LOGGER.TENSORBOARD: + tb_logger = pl_loggers.TensorBoardLogger(save_dir=cfg.FOLDER_EXP, + sub_dir="tensorboard", + version="", + name="") + loggers.append(tb_logger) + logger.info(OmegaConf.to_yaml(cfg)) + + # create dataset + datasets = get_datasets(cfg, logger=logger) + logger.info("datasets module {} initialized".format("".join( + cfg.TRAIN.DATASETS))) + + # create model + model = get_model(cfg, datasets[0]) + logger.info("model {} loaded".format(cfg.model.model_type)) + + + + if cfg.TRAIN.STAGE in ['gpt']: + logger.info("Loading pretrain vae from {}".format( + cfg.TRAIN.PRETRAINED_VAE)) + state_dict = torch.load(cfg.TRAIN.PRETRAINED_VAE, + map_location="cpu")["state_dict"] + # extract encoder/decoder + from collections import OrderedDict + vae_dict = OrderedDict() + for k, v in state_dict.items(): + if k.split(".")[0] == "vae": + name = k.replace("vae.vqvae", "vqvae") + vae_dict[name] = v + model.vae.load_state_dict(vae_dict, strict=True) + else: + if cfg.TRAIN.PRETRAINED_VAE: + logger.info("Loading pretrain vae from {}".format( + cfg.TRAIN.PRETRAINED_VAE)) + state_dict = torch.load(cfg.TRAIN.PRETRAINED_VAE, + map_location="cpu")["state_dict"] + # extract encoder/decoder + from collections import OrderedDict + vae_dict = OrderedDict() + for k, v in state_dict.items(): + if k.split(".")[0] == "vae": + name = k.replace("vae.", "") + vae_dict[name] = v + model.vae.load_state_dict(vae_dict, strict=True) + + + # optimizer + metric_monitor = { + "Train_jf": "recons/text2jfeats/train", + "Val_jf": "recons/text2jfeats/val", + "Train_rf": "recons/text2rfeats/train", + "Val_rf": "recons/text2rfeats/val", + "APE root": "Metrics/APE_root", + "APE mean pose": "Metrics/APE_mean_pose", + "AVE root": "Metrics/AVE_root", + "AVE mean pose": "Metrics/AVE_mean_pose", + "R_TOP_1": "Metrics/R_precision_top_1", + "R_TOP_2": "Metrics/R_precision_top_2", + "R_TOP_3": "Metrics/R_precision_top_3", + "gt_R_TOP_1": "Metrics/gt_R_precision_top_1", + "gt_R_TOP_2": "Metrics/gt_R_precision_top_2", + "gt_R_TOP_3": "Metrics/gt_R_precision_top_3", + "FID": "Metrics/FID", + "gt_FID": "Metrics/gt_FID", + "Diversity": "Metrics/Diversity", + "gt_Diversity": "Metrics/gt_Diversity", + "MM dist": "Metrics/Matching_score", + "Accuracy": "Metrics/accuracy", + "gt_Accuracy": "Metrics/gt_accuracy", + } + + # callbacks + callbacks = [ + pl.callbacks.RichProgressBar(), + ProgressLogger(metric_monitor=metric_monitor), + # ModelCheckpoint(dirpath=os.path.join(cfg.FOLDER_EXP,'checkpoints'),filename='latest-{epoch}',every_n_epochs=1,save_top_k=1,save_last=True,save_on_train_epoch_end=True), + ModelCheckpoint( + dirpath=os.path.join(cfg.FOLDER_EXP, "checkpoints"), + filename="{epoch}", + monitor="step", + mode="max", + every_n_epochs=cfg.LOGGER.SAVE_CHECKPOINT_EPOCH, + save_top_k=-1, + save_last=False, + save_on_train_epoch_end=True, + ), + ] + logger.info("Callbacks initialized") + + if len(cfg.DEVICE) > 1: + # ddp_strategy = DDPStrategy(find_unused_parameters=False) + ddp_strategy = "ddp" + else: + ddp_strategy = None + + # trainer + trainer = pl.Trainer( + benchmark=False, + max_epochs=cfg.TRAIN.END_EPOCH, + accelerator=cfg.ACCELERATOR, + devices=cfg.DEVICE, + # gpus=2, + strategy=ddp_strategy, + # move_metrics_to_cpu=True, + default_root_dir=cfg.FOLDER_EXP, + log_every_n_steps=cfg.LOGGER.VAL_EVERY_STEPS, + deterministic=False, + detect_anomaly=False, + enable_progress_bar=True, + logger=loggers, + callbacks=callbacks, + check_val_every_n_epoch=cfg.LOGGER.VAL_EVERY_STEPS, + ) + logger.info("Trainer initialized") + + if cfg.TRAIN.STAGE == 'temos': + vae_type = 'temos' + else: + vae_type = cfg.model.motion_vae.target.split(".")[-1].lower().replace( + "vae", "") + + + if cfg.TRAIN.PRETRAINED_MLD: + logger.info("Loading pretrain mld from {}".format( + cfg.TRAIN.PRETRAINED_MLD)) + + state_dict = torch.load(cfg.TRAIN.PRETRAINED_MLD, + map_location="cpu")["state_dict"] + + + from collections import OrderedDict + vae_dict = OrderedDict() + for k, v in state_dict.items(): + if k.split(".")[0] == "denoiser": + name = k.replace("denoiser.", "") + vae_dict[name] = v + model.denoiser.load_state_dict(vae_dict, strict=True) + + + + if cfg.TRAIN.PRETRAINED: + + logger.info("Loading pretrain mode from {}".format( + cfg.TRAIN.PRETRAINED)) + logger.info("Attention! VAE will be recovered") + state_dict = torch.load(cfg.TRAIN.PRETRAINED, + map_location="cpu")["state_dict"] + # remove mismatched and unused params + from collections import OrderedDict + + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k not in ["denoiser.sequence_pos_encoding.pe"]: + new_state_dict[k] = v + model.load_state_dict(new_state_dict, strict=False) + + # fitting + + if cfg.TRAIN.RESUME: + trainer.validate(model, datamodule=datasets[0], ckpt_path=cfg.TRAIN.PRETRAINED) + trainer.fit(model, + datamodule=datasets[0], + ckpt_path=cfg.TRAIN.PRETRAINED) + else: + trainer.fit(model, datamodule=datasets[0]) + + # checkpoint + checkpoint_folder = trainer.checkpoint_callback.dirpath + logger.info(f"The checkpoints are stored in {checkpoint_folder}") + logger.info( + f"The outputs of this experiment are stored in {cfg.FOLDER_EXP}") + + # end + logger.info("Training ends!") + + +if __name__ == "__main__": + main() diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..30a09b77b5c10706a0b6fd8bf8140e8fd49603cd --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 ZJU3DV + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8318c86b357b6ddbf674ad238b8745d2781cab4b --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +Test \ No newline at end of file diff --git a/TRAIN_causal_TAE.sh b/TRAIN_causal_TAE.sh new file mode 100644 index 0000000000000000000000000000000000000000..ce44d542d69b8f031662fae356beb9d702927a38 --- /dev/null +++ b/TRAIN_causal_TAE.sh @@ -0,0 +1,22 @@ +NUM_GPUS=${1:-1} # default: 1 GPU +dataset_name=${2:-t2m_272} # default: t2m_272, options: t2m_272, t2m_babel_272 + +BATCH_SIZE=$((128 / NUM_GPUS)) + +echo "Using $NUM_GPUS GPUs, each with a batch size of $BATCH_SIZE" + +accelerate launch --num_processes $NUM_GPUS train_causal_TAE.py \ +--batch-size $BATCH_SIZE \ +--lr 0.00005 \ +--total-iter 2000000 \ +--lr-scheduler 1900000 \ +--down-t 2 \ +--depth 3 \ +--dilation-growth-rate 3 \ +--out-dir Experiments \ +--dataname $dataset_name \ +--exp-name causal_TAE_${dataset_name} \ +--root_loss 7.0 \ +--latent_dim 16 \ +--hidden_size 1024 \ +--num_gpus $NUM_GPUS \ No newline at end of file diff --git a/TRAIN_evaluator_272.sh b/TRAIN_evaluator_272.sh new file mode 100644 index 0000000000000000000000000000000000000000..f032f627e010952f441538a20b07cb6594d4c0fd --- /dev/null +++ b/TRAIN_evaluator_272.sh @@ -0,0 +1,6 @@ +export HF_ENDPOINT=https://hf-mirror.com +cd Evaluator_272 +huggingface-cli download --resume-download distilbert/distilbert-base-uncased --local-dir ./deps/distilbert-base-uncased +ln -s ../humanml3d_272 ./datasets/humanml3d_272 +python -m train --cfg configs/configs_evaluator_272/H3D-TMR.yaml --cfg_assets configs/assets.yaml --batch_size 256 --nodebug +cd .. \ No newline at end of file diff --git a/TRAIN_motionstreamer.sh b/TRAIN_motionstreamer.sh new file mode 100644 index 0000000000000000000000000000000000000000..a497703895e36d94dc71d921731343837dda8d12 --- /dev/null +++ b/TRAIN_motionstreamer.sh @@ -0,0 +1,15 @@ +NUM_GPUS=${1:-1} # default: 1 GPU + +BATCH_SIZE=$((256 / NUM_GPUS)) + +echo "Using $NUM_GPUS GPUs, each with a batch size of $BATCH_SIZE" + +accelerate launch --num_processes $NUM_GPUS train_motionstreamer.py \ +--batch-size $BATCH_SIZE \ +--lr 0.0001 \ +--total-iter 100000 \ +--out-dir Experiments \ +--exp-name motionstreamer_model \ +--dataname t2m_babel_272 \ +--latent_dir babel_272_stream/t2m_babel_latents \ +--num_gpus $NUM_GPUS \ No newline at end of file diff --git a/TRAIN_t2m.sh b/TRAIN_t2m.sh new file mode 100644 index 0000000000000000000000000000000000000000..769a1d428abe3005318ad144ba37b1e53db8e537 --- /dev/null +++ b/TRAIN_t2m.sh @@ -0,0 +1,15 @@ +NUM_GPUS=${1:-1} # default: 1 GPU + +BATCH_SIZE=$((256 / NUM_GPUS)) + +echo "Using $NUM_GPUS GPUs, each with a batch size of $BATCH_SIZE" + +accelerate launch --num_processes $NUM_GPUS train_t2m.py \ +--batch-size $BATCH_SIZE \ +--lr 0.0001 \ +--total-iter 100000 \ +--out-dir Experiments \ +--exp-name t2m_model \ +--dataname t2m_272 \ +--latent_dir humanml3d_272/t2m_latents \ +--num_gpus $NUM_GPUS \ No newline at end of file diff --git a/assets/teaser.jpg b/assets/teaser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b49953140d48925b6066f99a297d3546a986b7b2 --- /dev/null +++ b/assets/teaser.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7958c8564ae20e48165890a08d21d1b63d2a6ce94fed017fb7b5504286f0b5da +size 751242 diff --git a/babel_272_stream/.gitattributes b/babel_272_stream/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..1ef325f1b111266a6b26e0196871bd78baa8c2f3 --- /dev/null +++ b/babel_272_stream/.gitattributes @@ -0,0 +1,59 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.lz4 filter=lfs diff=lfs merge=lfs -text +*.mds filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +# Audio files - uncompressed +*.pcm filter=lfs diff=lfs merge=lfs -text +*.sam filter=lfs diff=lfs merge=lfs -text +*.raw filter=lfs diff=lfs merge=lfs -text +# Audio files - compressed +*.aac filter=lfs diff=lfs merge=lfs -text +*.flac filter=lfs diff=lfs merge=lfs -text +*.mp3 filter=lfs diff=lfs merge=lfs -text +*.ogg filter=lfs diff=lfs merge=lfs -text +*.wav filter=lfs diff=lfs merge=lfs -text +# Image files - uncompressed +*.bmp filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.tiff filter=lfs diff=lfs merge=lfs -text +# Image files - compressed +*.jpg filter=lfs diff=lfs merge=lfs -text +*.jpeg filter=lfs diff=lfs merge=lfs -text +*.webp filter=lfs diff=lfs merge=lfs -text +# Video files - compressed +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.webm filter=lfs diff=lfs merge=lfs -text diff --git a/babel_272_stream/README.md b/babel_272_stream/README.md new file mode 100644 index 0000000000000000000000000000000000000000..088ccdb349bec6ad7591c66395857ce934c61307 --- /dev/null +++ b/babel_272_stream/README.md @@ -0,0 +1,62 @@ +--- +license: apache-2.0 +--- +## 🚀 Dataset Usage +To facilitate researchers, we provide the processed streaming 272-dim Motion Representation of [BABEL](https://babel.is.tue.mpg.de/) dataset in this Hugging Face repo. + +NOTE: We process the original BABEL dataset to support training of streaming motion generation. +e.g. If there is a motion sequence A, annotated as (A1, A2, A3, A4) in BABEL dataset, each subsequence has text description: (A1_t, A2_t, A3_t, A4_t). + +Then, our BABEL-stream is constructed as: + +seq1: (A1, A2) --- seq1_text: (A1_t*A2_t#A1_length) + +seq2: (A2, A3) --- seq2_text: (A2_t*A3_t#A2_length) + +seq3: (A3, A4) --- seq3_text: (A3_t*A4_t#A3_length) + +Here, * and # is separation symbol, A1_length means the number of frames of subsequence A1. + +Motions are resampled into 30 FPS. + +The dataset is organized as: +``` +./ + ├── train_stream + ├── seq1.npy + ... + ├── train_stream_text + ├── seq1.txt + ... + ├── val_stream + ├── seq1.npy + ... + ├── val_stream_text + ├── seq1.txt + ... +``` + +❗️❗️❗️ The processed data is solely for academic purposes. Make sure you read through the [BABEL License](https://babel.is.tue.mpg.de/license.html). + +## 📖 Paper & Project Page & Code +* [Arxiv Paper](https://arxiv.org/abs/2503.15451) +* [Project Page](https://zju3dv.github.io/MotionStreamer/) +* [Code](https://github.com/zju3dv/MotionStreamer) + +## 🏃 Processing script +For more details of how to obtain the 272-dim motion representation, as well as other useful tools (e.g., Visualization and Conversion to BVH format), please refer to our [GitHub repo](https://github.com/Li-xingXiao/272-dim-Motion-Representation). + +## 🌹 Acknowledgement +This repository builds upon the following awesome datasets and projects: +- [BABEL](https://babel.is.tue.mpg.de/) + +## 🤝🏼 Citation +If our project is helpful for your research, please consider citing : +``` +@article{xiao2025motionstreamer, + title={MotionStreamer: Streaming Motion Generation via Diffusion-based Autoregressive Model in Causal Latent Space}, + author={Xiao, Lixing and Lu, Shunlin and Pi, Huaijin and Fan, Ke and Pan, Liang and Zhou, Yueer and Feng, Ziyong and Zhou, Xiaowei and Peng, Sida and Wang, Jingbo}, + journal={arXiv preprint arXiv:2503.15451}, + year={2025} + } +``` \ No newline at end of file diff --git a/babel_272_stream/train_stream.zip b/babel_272_stream/train_stream.zip new file mode 100644 index 0000000000000000000000000000000000000000..65fa00ac0c809563d3a8b9e22314f7f84742bb7f --- /dev/null +++ b/babel_272_stream/train_stream.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35db924d754e321f673a72c22b80d5d725f55d74151fc34351f554ef6bf33a2e +size 6901914721 diff --git a/babel_272_stream/train_stream_text.zip b/babel_272_stream/train_stream_text.zip new file mode 100644 index 0000000000000000000000000000000000000000..a756a4af4d61979632e698beb159f9b447a8bacb --- /dev/null +++ b/babel_272_stream/train_stream_text.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d46561fcaf62738b1d08cf54a851ffecb3fb7a154f9663b199dfa83f0d677046 +size 4746908 diff --git a/babel_272_stream/val_stream.zip b/babel_272_stream/val_stream.zip new file mode 100644 index 0000000000000000000000000000000000000000..025ec509ebbe84fe6ea611e1f0531a2e10b2a124 --- /dev/null +++ b/babel_272_stream/val_stream.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0564c64ce642330222b3ed83d031f5f3765c6979a82f17a2259e07d80d0ff78a +size 2580199524 diff --git a/babel_272_stream/val_stream_text.zip b/babel_272_stream/val_stream_text.zip new file mode 100644 index 0000000000000000000000000000000000000000..b74aaa6f241c4a0c8b59f92a11670b1e454ce0ce --- /dev/null +++ b/babel_272_stream/val_stream_text.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba646f2836f03a7fa1a5470aa8c098d1b0e446872d5bf53b8b42283e5c1f368b +size 1685986 diff --git a/body_models/human_model_files/mano/MANO_LEFT.pkl b/body_models/human_model_files/mano/MANO_LEFT.pkl new file mode 100644 index 0000000000000000000000000000000000000000..32cdc533e2c01ed4995db2dc1302520d7d374c5a --- /dev/null +++ b/body_models/human_model_files/mano/MANO_LEFT.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4022f7083f2ca7c78b2b3d595abbab52debd32b09d372b16923a801f0ea6a30 +size 3821391 diff --git a/body_models/human_model_files/mano/MANO_RIGHT.pkl b/body_models/human_model_files/mano/MANO_RIGHT.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8e7ac7faf64ad51096ec1da626ea13757ed7f665 --- /dev/null +++ b/body_models/human_model_files/mano/MANO_RIGHT.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45d60aa3b27ef9107a7afd4e00808f307fd91111e1cfa35afd5c4a62de264767 +size 3821356 diff --git a/body_models/human_model_files/smpl/J_regressor_extra.npy b/body_models/human_model_files/smpl/J_regressor_extra.npy new file mode 100644 index 0000000000000000000000000000000000000000..d6cf8c0f6747d3c623a0d300c5176843ae99031d --- /dev/null +++ b/body_models/human_model_files/smpl/J_regressor_extra.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc968ea4f9855571e82f90203280836b01f13ee42a8e1b89d8d580b801242a89 +size 496160 diff --git a/body_models/human_model_files/smpl/SMPL_FEMALE.pkl b/body_models/human_model_files/smpl/SMPL_FEMALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..92a201f4839bd95c1c1986437c7c6a02d7d1ae99 --- /dev/null +++ b/body_models/human_model_files/smpl/SMPL_FEMALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a583c1b98e4afc19042641f1bae5cd8a1f712a6724886291a7627ec07acd408d +size 39056454 diff --git a/body_models/human_model_files/smpl/SMPL_MALE.pkl b/body_models/human_model_files/smpl/SMPL_MALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..43dfecc57d9b7aa99cd2398df818ba252be7f605 --- /dev/null +++ b/body_models/human_model_files/smpl/SMPL_MALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8c0bbbbc635dcb166ed29c303fb4bef16ea5f623e5a89263495a9e403575bd +size 39056404 diff --git a/body_models/human_model_files/smpl/SMPL_NEUTRAL.pkl b/body_models/human_model_files/smpl/SMPL_NEUTRAL.pkl new file mode 100644 index 0000000000000000000000000000000000000000..26574fd104c4b69467f3c7c3516a8508d8a1a36e --- /dev/null +++ b/body_models/human_model_files/smpl/SMPL_NEUTRAL.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e65c74ad9b998783132f00880d1025a8d64b158e040e6ef13a557e5098bc42 +size 39001280 diff --git a/body_models/human_model_files/smpl/VPOSER_CKPT/TR00_004_00_WO_accad.ini b/body_models/human_model_files/smpl/VPOSER_CKPT/TR00_004_00_WO_accad.ini new file mode 100644 index 0000000000000000000000000000000000000000..e6694eefdcbf0588daf474c10d9f93487d39938d --- /dev/null +++ b/body_models/human_model_files/smpl/VPOSER_CKPT/TR00_004_00_WO_accad.ini @@ -0,0 +1,29 @@ +[All] +adam_beta1 : 0.9 +base_lr : 0.005 +batch_size : 512 +best_model_fname : None +cuda_id : 0 +data_shape : [1, 21, 3] +dataset_dir : None +display_model_gender : male +expr_code : 004_00_WO_accad +fp_precision : 32 +ip_avoid : False +kl_coef : 0.005 +latentD : 32 +log_every_epoch : 2 +model_type : smpl +n_workers : 10 +num_bodies_to_display : 10 +num_epochs : 100 +num_neurons : 512 +reg_coef : 0.0001 +remove_Zrot : True +seed : 4815 +sm_coef : 0.01 +test_only : False +try_num : 0 +use_cont_repr : True +verbosity : 0 +work_dir : None diff --git a/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/._TR00_E096.pt b/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/._TR00_E096.pt new file mode 100644 index 0000000000000000000000000000000000000000..3bda19c8bfbf67322c9523195ce6658df3d6fe43 --- /dev/null +++ b/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/._TR00_E096.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e2615cd1d2e78cdfac7169c6182a7352d02992336dad7329d3d97f6947fb515 +size 4096 diff --git a/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/TR00_E096.pt b/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/TR00_E096.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec00aac415784a56246b6879249883c7084f9559 --- /dev/null +++ b/body_models/human_model_files/smpl/VPOSER_CKPT/snapshots/TR00_E096.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e4ad40f922606989939d3fae6eadf82d1a8e98112dffb6e39d89d6471270d5c +size 2702962 diff --git a/body_models/human_model_files/smpl/VPOSER_CKPT/vposer_smpl.py b/body_models/human_model_files/smpl/VPOSER_CKPT/vposer_smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..9b07d6286690bb67d0e4c0c0b46fcfcb84fb20e1 --- /dev/null +++ b/body_models/human_model_files/smpl/VPOSER_CKPT/vposer_smpl.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), +# acting on behalf of its Max Planck Institute for Intelligent Systems and the +# Max Planck Institute for Biological Cybernetics. All rights reserved. +# +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights +# on this computer program. You can only use this computer program if you have closed a license agreement +# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and liable to prosecution. +# Contact: ps-license@tuebingen.mpg.de +# +# +# If you use this code in a research publication please consider citing the following: +# +# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image +# AMASS: Archive of Motion Capture as Surface Shapes +# +# +# Code Developed by: +# Nima Ghorbani +# Vassilis Choutas for ContinousRotReprDecoder +# +# 2018.01.02 + +''' +A human body pose prior built with Auto-Encoding Variational Bayes +''' + +__all__ = ['VPoser'] + +import os, sys, shutil + +import torch + +from torch import nn +from torch.nn import functional as F + +import numpy as np + +import torchgeometry as tgm + +class ContinousRotReprDecoder(nn.Module): + def __init__(self): + super(ContinousRotReprDecoder, self).__init__() + + def forward(self, module_input): + reshaped_input = module_input.view(-1, 3, 2) + + b1 = F.normalize(reshaped_input[:, :, 0], dim=1) + + dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True) + b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1) + b3 = torch.cross(b1, b2, dim=1) + + return torch.stack([b1, b2, b3], dim=-1) + + +class VPoser(nn.Module): + def __init__(self, num_neurons, latentD, data_shape, use_cont_repr=True): + super(VPoser, self).__init__() + + self.latentD = latentD + self.use_cont_repr = use_cont_repr + + n_features = np.prod(data_shape) + self.num_joints = data_shape[1] + + self.bodyprior_enc_bn1 = nn.BatchNorm1d(n_features) + self.bodyprior_enc_fc1 = nn.Linear(n_features, num_neurons) + self.bodyprior_enc_bn2 = nn.BatchNorm1d(num_neurons) + self.bodyprior_enc_fc2 = nn.Linear(num_neurons, num_neurons) + self.bodyprior_enc_mu = nn.Linear(num_neurons, latentD) + self.bodyprior_enc_logvar = nn.Linear(num_neurons, latentD) + self.dropout = nn.Dropout(p=.1, inplace=False) + + self.bodyprior_dec_fc1 = nn.Linear(latentD, num_neurons) + self.bodyprior_dec_fc2 = nn.Linear(num_neurons, num_neurons) + + if self.use_cont_repr: + self.rot_decoder = ContinousRotReprDecoder() + + self.bodyprior_dec_out = nn.Linear(num_neurons, self.num_joints* 6) + + def encode(self, Pin): + ''' + + :param Pin: Nx(numjoints*3) + :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle + :return: + ''' + Xout = Pin.view(Pin.size(0), -1) # flatten input + Xout = self.bodyprior_enc_bn1(Xout) + + Xout = F.leaky_relu(self.bodyprior_enc_fc1(Xout), negative_slope=.2) + Xout = self.bodyprior_enc_bn2(Xout) + Xout = self.dropout(Xout) + Xout = F.leaky_relu(self.bodyprior_enc_fc2(Xout), negative_slope=.2) + return torch.distributions.normal.Normal(self.bodyprior_enc_mu(Xout), F.softplus(self.bodyprior_enc_logvar(Xout))) + + def decode(self, Zin, output_type='matrot'): + assert output_type in ['matrot', 'aa'] + + Xout = F.leaky_relu(self.bodyprior_dec_fc1(Zin), negative_slope=.2) + Xout = self.dropout(Xout) + Xout = F.leaky_relu(self.bodyprior_dec_fc2(Xout), negative_slope=.2) + Xout = self.bodyprior_dec_out(Xout) + if self.use_cont_repr: + Xout = self.rot_decoder(Xout) + else: + Xout = torch.tanh(Xout) + + Xout = Xout.view([-1, 1, self.num_joints, 9]) + if output_type == 'aa': return VPoser.matrot2aa(Xout) + return Xout + + def forward(self, Pin, input_type='matrot', output_type='matrot'): + ''' + + :param Pin: aa: Nx1xnum_jointsx3 / matrot: Nx1xnum_jointsx9 + :param input_type: matrot / aa for matrix rotations or axis angles + :param output_type: matrot / aa + :return: + ''' + assert output_type in ['matrot', 'aa'] + # if input_type == 'aa': Pin = VPoser.aa2matrot(Pin) + q_z = self.encode(Pin) + q_z_sample = q_z.rsample() + Prec = self.decode(q_z_sample) + if output_type == 'aa': Prec = VPoser.matrot2aa(Prec) + + #return Prec, q_z.mean, q_z.sigma + return {'pose':Prec, 'mean':q_z.mean, 'std':q_z.scale} + + def sample_poses(self, num_poses, output_type='aa', seed=None): + np.random.seed(seed) + dtype = self.bodyprior_dec_fc1.weight.dtype + device = self.bodyprior_dec_fc1.weight.device + self.eval() + with torch.no_grad(): + Zgen = torch.tensor(np.random.normal(0., 1., size=(num_poses, self.latentD)), dtype=dtype).to(device) + return self.decode(Zgen, output_type=output_type) + + @staticmethod + def matrot2aa(pose_matrot): + ''' + :param pose_matrot: Nx1xnum_jointsx9 + :return: Nx1xnum_jointsx3 + ''' + batch_size = pose_matrot.size(0) + homogen_matrot = F.pad(pose_matrot.view(-1, 3, 3), [0,1]) + pose = tgm.rotation_matrix_to_angle_axis(homogen_matrot).view(batch_size, 1, -1, 3).contiguous() + return pose + + @staticmethod + def aa2matrot(pose): + ''' + :param Nx1xnum_jointsx3 + :return: pose_matrot: Nx1xnum_jointsx9 + ''' + batch_size = pose.size(0) + pose_body_matrot = tgm.angle_axis_to_rotation_matrix(pose.reshape(-1, 3))[:, :3, :3].contiguous().view(batch_size, 1, -1, 9) + return pose_body_matrot + diff --git a/body_models/human_model_files/smplx/MANO_SMPLX_vertex_ids.pkl b/body_models/human_model_files/smplx/MANO_SMPLX_vertex_ids.pkl new file mode 100644 index 0000000000000000000000000000000000000000..dabec1377a0da4c511a519a00f51f1a3a23f33af --- /dev/null +++ b/body_models/human_model_files/smplx/MANO_SMPLX_vertex_ids.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5abe70b6574de25470475091e8008314a5b90127eb48c3e63bfa0adf8c04dcf +size 13535 diff --git a/body_models/human_model_files/smplx/SMPL-X__FLAME_vertex_ids.npy b/body_models/human_model_files/smplx/SMPL-X__FLAME_vertex_ids.npy new file mode 100644 index 0000000000000000000000000000000000000000..c940d3aa6cb4cbbcc348fd518b15d8777dc350fd --- /dev/null +++ b/body_models/human_model_files/smplx/SMPL-X__FLAME_vertex_ids.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e70cdc3659aae699b9732e8dd4af49106310c69b90dc83d9f73e96dbf871e49 +size 40312 diff --git a/body_models/human_model_files/smplx/SMPLX_FEMALE.npz b/body_models/human_model_files/smplx/SMPLX_FEMALE.npz new file mode 100644 index 0000000000000000000000000000000000000000..f97be40778b6ac922a0706d42c099fd8f8d942fa --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_FEMALE.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e37bd22dff93362c92cea9c791c62a2d4d7e8d44b234f3e41be0020fa1c256 +size 108532279 diff --git a/body_models/human_model_files/smplx/SMPLX_FEMALE.pkl b/body_models/human_model_files/smplx/SMPLX_FEMALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..44a1a7645392010fbee7b206ab4400241fc7b488 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_FEMALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b870ce1fd05b46dd81e2de6269b2955667c931c8594999eb22eeb489b00e2c1f +size 146809856 diff --git a/body_models/human_model_files/smplx/SMPLX_MALE.npz b/body_models/human_model_files/smplx/SMPLX_MALE.npz new file mode 100644 index 0000000000000000000000000000000000000000..436ea68e6bdd388ca7d536a7b3d79c714d190e89 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_MALE.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79360d466228bec1b9f9d922ea48df718a0a09bccddace18cfec98b0edd68b73 +size 108491578 diff --git a/body_models/human_model_files/smplx/SMPLX_MALE.pkl b/body_models/human_model_files/smplx/SMPLX_MALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e07266dd7a640e7cba17f21bb40c66034f0f1b65 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_MALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4f94c40261ac4762bb9b09142d11bf47e1cc3d6b49b6bbcc4a2731451bf5632 +size 543102085 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL.npz b/body_models/human_model_files/smplx/SMPLX_NEUTRAL.npz new file mode 100644 index 0000000000000000000000000000000000000000..34c8d8f151af5a02c8c8b024e3783e2e51554e19 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15eb61ac2f91dcd6e340913e281b2b8a0a910ebe0955af9251b9bb99fd11d02b +size 108490191 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL.pkl b/body_models/human_model_files/smplx/SMPLX_NEUTRAL.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6bf7f3e5361aabd2086026b1db6883eef0abed5a --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b0279321ea9bd3cec5541c03b1f1c9ab9d197896943035c3abeef47f699bc5e +size 542798306 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npy b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npy new file mode 100644 index 0000000000000000000000000000000000000000..a59a6de8926b39175c4d9bae8478d7cf8e564eca --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:248e277858008fea271d1ea3874eed2310dfd57fa160ea07c467cf6a061e0ecd +size 167260951 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npz b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npz new file mode 100644 index 0000000000000000000000000000000000000000..59183bf3d42a5b3c3f973b249c11a06892a93bff --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecb628fadd2b40f42cd39378d1e429cd30acc0bab6104676898d4374b804163d +size 167261087 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW_WiFlame.npy b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW_WiFlame.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb92f91bb815cf1ebad83d942ce5812ef7cf85c4 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW_WiFlame.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9047e853fc08caa5cef648aa691bf80cf423ca5f0693d825c029a6a7b0bedc51 +size 215482118 diff --git a/body_models/human_model_files/smplx/SMPLX_NEUTRAL_WiFlame.npy b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_WiFlame.npy new file mode 100644 index 0000000000000000000000000000000000000000..eda9125a67aad196999be71a717b9d6eed3e21f3 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_NEUTRAL_WiFlame.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4de3b53501fb7b3acdb64533eac9088b4cf39710fa8dfe39dfb67d193968ff31 +size 215482118 diff --git a/body_models/human_model_files/smplx/SMPLX_to_J14.pkl b/body_models/human_model_files/smplx/SMPLX_to_J14.pkl new file mode 100644 index 0000000000000000000000000000000000000000..db8aa5c74b860a2b9555383d5ca2a09523851fe4 --- /dev/null +++ b/body_models/human_model_files/smplx/SMPLX_to_J14.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df844ddea85b0a400a2e8dbe63d09d19f2b1b7ec0e0e952daeae08f83d82d61 +size 4692193 diff --git a/body_models/human_model_files/smplx/smplx_kid_template.npy b/body_models/human_model_files/smplx/smplx_kid_template.npy new file mode 100644 index 0000000000000000000000000000000000000000..8ce7bc403545dfb29f361787cb7bca1df8316d6e --- /dev/null +++ b/body_models/human_model_files/smplx/smplx_kid_template.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdce4f5886b9ddcb6da3ee0f70ae636b1aa1292f2b379c4c3149fce8abc0a604 +size 251528 diff --git a/body_models/human_model_files/smplx/smplx_parts_segm.pkl b/body_models/human_model_files/smplx/smplx_parts_segm.pkl new file mode 100644 index 0000000000000000000000000000000000000000..77ce98631741ba3887d689077baf35422d39299d --- /dev/null +++ b/body_models/human_model_files/smplx/smplx_parts_segm.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb69c10801205c9cfb5353fdeb1b9cc5ade53d14c265c3339421cdde8b9c91e7 +size 1323168 diff --git a/body_models/human_model_files/smplx/smplx_vert_segmentation.json b/body_models/human_model_files/smplx/smplx_vert_segmentation.json new file mode 100644 index 0000000000000000000000000000000000000000..cb3adb8e6668a67f77e9d163807392ff991e7821 --- /dev/null +++ b/body_models/human_model_files/smplx/smplx_vert_segmentation.json @@ -0,0 +1,12027 @@ +{ + "rightHand": [ + 7333, + 7334, + 7331, + 7332, + 7338, + 7335, + 7336, + 7337, + 7340, + 7339, + 7342, + 7343, + 7341, + 7346, + 7344, + 7345, + 7347, + 7348, + 7349, + 7350, + 7351, + 7352, + 7353, + 7354, + 7357, + 7358, + 7355, + 7356, + 7361, + 7362, + 7359, + 7360, + 7365, + 7366, + 7363, + 7364, + 7370, + 7367, + 7368, + 7369, + 7373, + 7371, + 7372, + 7374, + 7376, + 7375, + 7383, + 7384, + 7381, + 7382, + 7385, + 7386, + 7392, + 7391, + 7395, + 7396, + 7393, + 7394, + 7399, + 7400, + 7397, + 7398, + 7403, + 7404, + 7401, + 7402, + 7407, + 7408, + 7405, + 7406, + 7409, + 7410, + 7413, + 7414, + 7411, + 7412, + 7415, + 7416, + 7419, + 7420, + 7421, + 7423, + 7422, + 7424, + 7425, + 7426, + 7429, + 7430, + 7427, + 7428, + 7433, + 7434, + 7431, + 7432, + 7435, + 7436, + 7438, + 7437, + 7439, + 7442, + 7443, + 7440, + 7441, + 7444, + 7445, + 7446, + 7447, + 7449, + 7448, + 7450, + 7451, + 7456, + 7459, + 7465, + 7466, + 7463, + 7464, + 7469, + 7467, + 7468, + 7470, + 7471, + 7472, + 7479, + 7480, + 7481, + 7482, + 7485, + 7483, + 7484, + 7487, + 7488, + 7486, + 7491, + 7492, + 7489, + 7490, + 7493, + 7494, + 7499, + 7500, + 7501, + 7504, + 7505, + 7512, + 7513, + 7514, + 7520, + 7521, + 7522, + 7523, + 7524, + 7525, + 7526, + 7528, + 7529, + 7527, + 7530, + 7532, + 7533, + 7534, + 7535, + 7540, + 7541, + 7539, + 7542, + 7544, + 7543, + 7546, + 7547, + 7545, + 7548, + 7549, + 7550, + 7551, + 7552, + 7553, + 7556, + 7558, + 7560, + 7561, + 7562, + 7563, + 7564, + 7571, + 7572, + 7573, + 7574, + 7575, + 7576, + 7577, + 7578, + 7579, + 7581, + 7585, + 7586, + 7588, + 7587, + 7590, + 7589, + 7596, + 7597, + 7598, + 7599, + 7602, + 7603, + 7600, + 7601, + 7604, + 7605, + 7607, + 7606, + 7608, + 7609, + 7610, + 7613, + 7612, + 7614, + 7615, + 7616, + 7617, + 7618, + 7619, + 7620, + 7621, + 7624, + 7625, + 7626, + 7627, + 7628, + 7629, + 7634, + 7635, + 7638, + 7637, + 7639, + 7640, + 7643, + 7947, + 7948, + 7957, + 7958, + 8047, + 8049, + 8048, + 8050, + 8051, + 8052, + 8053, + 8054, + 8055, + 8056, + 8057, + 8058, + 8059, + 8060, + 8061, + 8062, + 8063, + 8064, + 8065, + 8066, + 8067, + 8070, + 8071, + 8068, + 8069, + 8073, + 8072, + 8075, + 8074, + 8076, + 8077, + 8078, + 8080, + 8079, + 8081, + 8082, + 8083, + 8084, + 8085, + 8086, + 8088, + 8087, + 8089, + 8091, + 8090, + 8092, + 8093, + 8094, + 8095, + 8096, + 8097, + 8099, + 8098, + 8100, + 8101, + 8103, + 8102, + 8104, + 8105, + 8106, + 8107, + 8108, + 8109, + 8110, + 8111, + 8113, + 8112, + 8114, + 8115, + 8116, + 8119, + 8117, + 8118, + 8120, + 8121, + 8122, + 8123, + 8124, + 8125, + 8126, + 8127, + 8128 + ], + "rightUpLeg": [ + 6225, + 6226, + 6228, + 6229, + 6241, + 6238, + 6239, + 6240, + 6242, + 6243, + 6244, + 6245, + 6261, + 6262, + 6263, + 6264, + 6265, + 6266, + 6267, + 6268, + 6271, + 6272, + 6269, + 6270, + 6288, + 6289, + 6290, + 6291, + 6294, + 6292, + 6293, + 6295, + 6296, + 6297, + 6298, + 6299, + 6300, + 6301, + 6302, + 6303, + 6304, + 6305, + 6306, + 6324, + 6325, + 6326, + 6327, + 6335, + 6339, + 6336, + 6337, + 6338, + 6340, + 6341, + 6342, + 6343, + 6344, + 6345, + 6346, + 6347, + 6348, + 6349, + 6350, + 6351, + 6352, + 6353, + 6354, + 6355, + 6356, + 6357, + 6358, + 6359, + 6360, + 6361, + 6362, + 6363, + 6365, + 6364, + 6366, + 6367, + 6368, + 6369, + 6370, + 6371, + 6372, + 6373, + 6374, + 6375, + 6376, + 6377, + 6378, + 6379, + 6380, + 6381, + 6382, + 6383, + 6384, + 6385, + 6386, + 6387, + 6388, + 6389, + 6390, + 6391, + 6392, + 6393, + 6394, + 6395, + 6396, + 6397, + 6398, + 6399, + 6400, + 6401, + 6402, + 6403, + 6407, + 6404, + 6405, + 6406, + 6408, + 6409, + 6410, + 6411, + 6412, + 6413, + 6414, + 6415, + 6416, + 6417, + 6418, + 6419, + 6423, + 6420, + 6421, + 6422, + 6424, + 6425, + 6426, + 6427, + 6428, + 6429, + 6430, + 6431, + 6432, + 6433, + 6437, + 6434, + 6435, + 6436, + 6528, + 6529, + 6530, + 6531, + 6533, + 6532, + 6534, + 6535, + 6536, + 6537, + 6538, + 6539, + 6550, + 6551, + 6552, + 6553, + 6556, + 6557, + 6554, + 6555, + 6558, + 6559, + 6560, + 6561, + 6565, + 6562, + 6563, + 6564, + 6575, + 6576, + 6577, + 6578, + 6611, + 6609, + 6610, + 6612, + 6613, + 6614, + 6615, + 6616, + 6617, + 6618, + 6650, + 6651, + 6662, + 6663, + 6664, + 6665, + 6706, + 6707, + 6734, + 6739, + 6740, + 6741, + 6742, + 6743, + 6744, + 6745, + 6746, + 6829, + 6830, + 6831, + 6833, + 6834, + 6835, + 6836, + 6837, + 6838, + 6839, + 6840, + 6841, + 6853, + 6854, + 6855, + 6856, + 6857, + 6875, + 6876, + 6877, + 6878, + 6888, + 6889, + 6890, + 6891, + 6892, + 6893, + 6894, + 6895, + 6896, + 6897, + 6898, + 6909, + 6910, + 8394, + 8395, + 8396, + 8397, + 8400, + 8401, + 8402, + 8403, + 8404, + 8721, + 8725 + ], + "leftArm": [ + 3256, + 3259, + 3258, + 3257, + 3267, + 3266, + 3312, + 3311, + 3347, + 3346, + 3349, + 3348, + 3401, + 3404, + 3403, + 3402, + 3407, + 3406, + 3405, + 3408, + 3409, + 3412, + 3411, + 3410, + 3419, + 3418, + 3417, + 3416, + 3421, + 3420, + 3422, + 3425, + 3424, + 3423, + 3868, + 3871, + 3870, + 3869, + 3898, + 3901, + 3900, + 3899, + 3912, + 3921, + 3920, + 3948, + 3947, + 3952, + 3951, + 3976, + 3975, + 3974, + 3973, + 3989, + 3988, + 3987, + 3990, + 4007, + 4010, + 4009, + 4008, + 4011, + 4014, + 4013, + 4012, + 4015, + 4017, + 4016, + 4018, + 4021, + 4020, + 4019, + 4022, + 4025, + 4024, + 4023, + 4026, + 4029, + 4028, + 4027, + 4031, + 4030, + 4035, + 4034, + 4036, + 4039, + 4038, + 4037, + 4040, + 4044, + 4043, + 4042, + 4046, + 4045, + 4047, + 4048, + 4061, + 4060, + 4067, + 4064, + 4063, + 4062, + 4072, + 4075, + 4074, + 4073, + 4079, + 4078, + 4077, + 4076, + 4135, + 4139, + 4138, + 4140, + 4141, + 4143, + 4142, + 4170, + 4171, + 4172, + 4173, + 4174, + 4249, + 4252, + 4251, + 4250, + 4261, + 4264, + 4263, + 4262, + 4268, + 4267, + 4266, + 4265, + 4269, + 4272, + 4271, + 4270, + 4275, + 4278, + 4277, + 4276, + 4281, + 4284, + 4283, + 4282, + 4285, + 4288, + 4287, + 4286, + 4290, + 4289, + 4296, + 4295, + 4301, + 4302, + 4303, + 4306, + 4305, + 4304, + 4307, + 4308, + 4310, + 4309, + 4311, + 4314, + 4313, + 4312, + 4316, + 4315, + 4318, + 4317, + 4322, + 4319, + 4334, + 4336, + 4335, + 4341, + 4344, + 4343, + 4342, + 4346, + 4345, + 4350, + 4349, + 4348, + 4347, + 4351, + 4353, + 4352, + 4354, + 4355, + 4358, + 4357, + 4356, + 4363, + 4369, + 4371, + 4370, + 4372, + 4373, + 4375, + 4378, + 4377, + 4385, + 4384, + 4383, + 4387, + 4386, + 4389, + 4398, + 4450, + 4449, + 4460, + 4465, + 4464, + 4471, + 4470, + 4475, + 4474, + 4476, + 4478, + 4484, + 4483, + 4485, + 4487, + 4488, + 4489, + 4492, + 4493, + 4495, + 4494, + 4496, + 4501, + 4500, + 4507, + 4506, + 4510, + 4518, + 4520, + 4519, + 4521, + 4522, + 4523, + 5398, + 5397, + 5399, + 5400, + 5471, + 5473, + 5472, + 5474, + 5475, + 5476, + 5477, + 5478, + 5479, + 5542, + 5543, + 5576, + 5573, + 5572, + 5577, + 5578, + 5580, + 5579, + 5581, + 5582, + 5583, + 5584, + 5587, + 5586, + 5585, + 5588, + 5589, + 5590, + 5591, + 5592, + 5593, + 5594, + 5595, + 5597, + 5607, + 5628 + ], + "head": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254, + 255, + 256, + 257, + 258, + 259, + 260, + 261, + 262, + 263, + 264, + 265, + 266, + 267, + 268, + 269, + 270, + 271, + 272, + 273, + 274, + 275, + 276, + 277, + 278, + 279, + 280, + 281, + 282, + 283, + 284, + 285, + 286, + 287, + 288, + 289, + 290, + 291, + 292, + 293, + 294, + 295, + 296, + 297, + 298, + 299, + 300, + 301, + 302, + 303, + 304, + 305, + 306, + 307, + 308, + 309, + 310, + 311, + 312, + 313, + 314, + 315, + 316, + 317, + 318, + 319, + 320, + 321, + 322, + 323, + 324, + 325, + 326, + 327, + 328, + 329, + 330, + 331, + 332, + 333, + 334, + 335, + 336, + 337, + 338, + 339, + 340, + 341, + 342, + 343, + 344, + 345, + 346, + 347, + 348, + 349, + 350, + 351, + 352, + 353, + 354, + 355, + 356, + 357, + 358, + 359, + 360, + 361, + 362, + 363, + 364, + 365, + 366, + 367, + 368, + 369, + 370, + 371, + 376, + 377, + 378, + 379, + 380, + 381, + 382, + 383, + 384, + 385, + 386, + 387, + 388, + 389, + 390, + 391, + 392, + 393, + 394, + 395, + 396, + 397, + 398, + 399, + 400, + 401, + 402, + 403, + 404, + 405, + 406, + 407, + 408, + 409, + 410, + 411, + 412, + 413, + 414, + 415, + 416, + 417, + 418, + 419, + 420, + 421, + 422, + 423, + 424, + 425, + 426, + 427, + 428, + 429, + 430, + 431, + 432, + 433, + 434, + 435, + 436, + 437, + 438, + 439, + 440, + 441, + 442, + 443, + 444, + 445, + 446, + 447, + 448, + 449, + 450, + 451, + 452, + 453, + 454, + 455, + 456, + 457, + 458, + 459, + 460, + 461, + 464, + 465, + 466, + 467, + 468, + 469, + 470, + 471, + 472, + 473, + 474, + 475, + 476, + 477, + 478, + 479, + 480, + 481, + 482, + 483, + 484, + 485, + 486, + 487, + 488, + 489, + 490, + 491, + 492, + 493, + 494, + 495, + 498, + 499, + 500, + 501, + 502, + 503, + 504, + 505, + 506, + 507, + 508, + 509, + 510, + 511, + 512, + 513, + 514, + 515, + 516, + 517, + 518, + 519, + 520, + 521, + 522, + 523, + 524, + 525, + 526, + 527, + 528, + 529, + 530, + 531, + 532, + 533, + 534, + 535, + 536, + 537, + 538, + 539, + 540, + 541, + 542, + 543, + 544, + 545, + 546, + 547, + 548, + 549, + 550, + 551, + 554, + 555, + 556, + 557, + 560, + 561, + 562, + 565, + 566, + 567, + 568, + 569, + 570, + 571, + 572, + 573, + 574, + 575, + 576, + 577, + 578, + 579, + 580, + 581, + 582, + 583, + 584, + 585, + 586, + 587, + 588, + 589, + 590, + 591, + 592, + 593, + 594, + 595, + 596, + 597, + 598, + 599, + 600, + 601, + 602, + 603, + 604, + 605, + 606, + 607, + 608, + 609, + 610, + 611, + 612, + 613, + 614, + 615, + 616, + 617, + 618, + 619, + 620, + 621, + 622, + 623, + 624, + 625, + 626, + 627, + 628, + 629, + 630, + 631, + 632, + 633, + 634, + 635, + 636, + 637, + 638, + 639, + 640, + 641, + 642, + 643, + 644, + 645, + 646, + 647, + 648, + 651, + 652, + 653, + 654, + 655, + 656, + 657, + 658, + 659, + 660, + 661, + 662, + 663, + 664, + 665, + 666, + 667, + 668, + 669, + 670, + 671, + 672, + 673, + 674, + 675, + 676, + 677, + 678, + 679, + 680, + 681, + 682, + 683, + 684, + 685, + 686, + 687, + 688, + 689, + 690, + 691, + 692, + 693, + 694, + 695, + 696, + 697, + 698, + 699, + 700, + 701, + 702, + 703, + 704, + 705, + 706, + 707, + 708, + 709, + 710, + 711, + 712, + 713, + 714, + 715, + 716, + 717, + 718, + 719, + 720, + 721, + 722, + 723, + 724, + 725, + 726, + 727, + 728, + 729, + 730, + 731, + 732, + 733, + 734, + 735, + 738, + 739, + 740, + 741, + 742, + 743, + 744, + 745, + 746, + 747, + 748, + 749, + 750, + 751, + 752, + 753, + 754, + 755, + 756, + 757, + 758, + 759, + 760, + 761, + 762, + 763, + 764, + 765, + 766, + 767, + 768, + 769, + 770, + 771, + 772, + 773, + 774, + 775, + 776, + 777, + 778, + 779, + 780, + 781, + 782, + 783, + 784, + 785, + 786, + 787, + 788, + 789, + 790, + 791, + 792, + 793, + 794, + 795, + 796, + 797, + 798, + 799, + 800, + 801, + 802, + 803, + 804, + 805, + 806, + 807, + 808, + 809, + 810, + 811, + 812, + 813, + 814, + 815, + 816, + 817, + 818, + 819, + 820, + 821, + 822, + 823, + 824, + 825, + 826, + 827, + 828, + 829, + 830, + 831, + 832, + 833, + 834, + 835, + 836, + 837, + 838, + 839, + 840, + 841, + 842, + 843, + 844, + 845, + 846, + 847, + 848, + 849, + 850, + 851, + 852, + 853, + 854, + 855, + 856, + 857, + 858, + 859, + 860, + 861, + 862, + 863, + 864, + 865, + 866, + 867, + 868, + 869, + 870, + 871, + 872, + 873, + 874, + 875, + 876, + 877, + 878, + 879, + 880, + 881, + 882, + 883, + 884, + 885, + 886, + 887, + 888, + 889, + 890, + 891, + 892, + 893, + 894, + 895, + 896, + 897, + 898, + 899, + 900, + 901, + 902, + 903, + 904, + 905, + 906, + 907, + 908, + 909, + 910, + 911, + 912, + 913, + 914, + 915, + 916, + 917, + 918, + 919, + 920, + 921, + 922, + 923, + 924, + 925, + 926, + 927, + 928, + 929, + 930, + 931, + 932, + 933, + 934, + 935, + 936, + 937, + 938, + 939, + 940, + 941, + 942, + 943, + 944, + 945, + 946, + 947, + 948, + 949, + 950, + 951, + 952, + 953, + 954, + 955, + 956, + 957, + 958, + 959, + 960, + 961, + 962, + 963, + 964, + 965, + 966, + 967, + 968, + 969, + 970, + 971, + 972, + 973, + 974, + 975, + 976, + 977, + 978, + 979, + 980, + 981, + 982, + 983, + 984, + 985, + 986, + 987, + 988, + 989, + 990, + 991, + 992, + 993, + 994, + 995, + 996, + 997, + 998, + 999, + 1000, + 1001, + 1002, + 1003, + 1004, + 1005, + 1006, + 1007, + 1008, + 1009, + 1010, + 1011, + 1012, + 1013, + 1014, + 1015, + 1016, + 1017, + 1018, + 1019, + 1020, + 1021, + 1022, + 1023, + 1024, + 1025, + 1026, + 1027, + 1028, + 1029, + 1030, + 1031, + 1032, + 1033, + 1034, + 1035, + 1036, + 1037, + 1038, + 1039, + 1040, + 1041, + 1042, + 1043, + 1044, + 1045, + 1046, + 1047, + 1048, + 1049, + 1050, + 1051, + 1052, + 1053, + 1054, + 1055, + 1056, + 1057, + 1058, + 1059, + 1060, + 1061, + 1062, + 1063, + 1064, + 1065, + 1066, + 1067, + 1068, + 1069, + 1070, + 1071, + 1072, + 1073, + 1074, + 1075, + 1076, + 1077, + 1078, + 1079, + 1080, + 1081, + 1082, + 1083, + 1084, + 1085, + 1086, + 1087, + 1088, + 1089, + 1090, + 1091, + 1092, + 1093, + 1094, + 1095, + 1096, + 1097, + 1098, + 1099, + 1100, + 1101, + 1102, + 1103, + 1104, + 1105, + 1106, + 1107, + 1108, + 1109, + 1110, + 1111, + 1112, + 1113, + 1114, + 1115, + 1116, + 1117, + 1118, + 1119, + 1120, + 1121, + 1122, + 1123, + 1124, + 1125, + 1126, + 1127, + 1128, + 1129, + 1130, + 1131, + 1132, + 1133, + 1134, + 1135, + 1136, + 1137, + 1138, + 1139, + 1140, + 1141, + 1142, + 1143, + 1144, + 1145, + 1146, + 1147, + 1148, + 1149, + 1150, + 1151, + 1152, + 1153, + 1154, + 1155, + 1156, + 1157, + 1158, + 1159, + 1160, + 1161, + 1162, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1174, + 1175, + 1176, + 1177, + 1178, + 1179, + 1180, + 1181, + 1182, + 1183, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1190, + 1191, + 1192, + 1193, + 1194, + 1195, + 1196, + 1197, + 1198, + 1199, + 1200, + 1201, + 1202, + 1203, + 1204, + 1205, + 1206, + 1207, + 1208, + 1209, + 1214, + 1215, + 1216, + 1217, + 1218, + 1219, + 1220, + 1221, + 1222, + 1223, + 1224, + 1225, + 1226, + 1227, + 1228, + 1229, + 1230, + 1231, + 1232, + 1233, + 1234, + 1235, + 1236, + 1237, + 1238, + 1239, + 1240, + 1241, + 1242, + 1243, + 1244, + 1245, + 1246, + 1247, + 1248, + 1249, + 1250, + 1251, + 1252, + 1253, + 1254, + 1255, + 1256, + 1257, + 1258, + 1259, + 1260, + 1261, + 1262, + 1263, + 1264, + 1265, + 1266, + 1267, + 1268, + 1269, + 1270, + 1271, + 1272, + 1273, + 1274, + 1275, + 1276, + 1277, + 1278, + 1279, + 1280, + 1281, + 1282, + 1283, + 1284, + 1285, + 1286, + 1287, + 1288, + 1289, + 1290, + 1291, + 1292, + 1293, + 1294, + 1295, + 1296, + 1297, + 1298, + 1299, + 1300, + 1301, + 1302, + 1303, + 1304, + 1305, + 1306, + 1307, + 1308, + 1309, + 1310, + 1311, + 1312, + 1313, + 1314, + 1315, + 1316, + 1317, + 1318, + 1319, + 1320, + 1321, + 1322, + 1323, + 1324, + 1325, + 1327, + 1328, + 1329, + 1330, + 1331, + 1332, + 1333, + 1334, + 1335, + 1336, + 1337, + 1338, + 1339, + 1340, + 1341, + 1342, + 1343, + 1344, + 1345, + 1346, + 1347, + 1348, + 1349, + 1350, + 1351, + 1352, + 1353, + 1354, + 1355, + 1356, + 1357, + 1358, + 1361, + 1362, + 1363, + 1364, + 1365, + 1366, + 1367, + 1368, + 1369, + 1370, + 1371, + 1372, + 1373, + 1374, + 1375, + 1376, + 1377, + 1378, + 1379, + 1380, + 1381, + 1382, + 1383, + 1384, + 1385, + 1387, + 1388, + 1389, + 1390, + 1391, + 1392, + 1393, + 1394, + 1395, + 1396, + 1397, + 1398, + 1399, + 1400, + 1401, + 1402, + 1403, + 1404, + 1405, + 1406, + 1407, + 1408, + 1409, + 1410, + 1411, + 1412, + 1413, + 1414, + 1415, + 1416, + 1417, + 1418, + 1419, + 1420, + 1421, + 1422, + 1423, + 1424, + 1425, + 1426, + 1427, + 1428, + 1429, + 1430, + 1431, + 1432, + 1433, + 1434, + 1435, + 1436, + 1437, + 1438, + 1439, + 1440, + 1441, + 1442, + 1443, + 1444, + 1445, + 1446, + 1447, + 1448, + 1449, + 1450, + 1451, + 1452, + 1453, + 1454, + 1455, + 1456, + 1457, + 1458, + 1459, + 1460, + 1461, + 1462, + 1463, + 1464, + 1465, + 1466, + 1467, + 1468, + 1469, + 1470, + 1471, + 1472, + 1473, + 1474, + 1475, + 1476, + 1477, + 1478, + 1479, + 1480, + 1481, + 1482, + 1483, + 1484, + 1485, + 1486, + 1487, + 1488, + 1489, + 1490, + 1491, + 1492, + 1493, + 1494, + 1495, + 1496, + 1497, + 1498, + 1499, + 1500, + 1501, + 1502, + 1503, + 1504, + 1505, + 1506, + 1507, + 1508, + 1509, + 1510, + 1511, + 1512, + 1513, + 1514, + 1515, + 1516, + 1517, + 1518, + 1519, + 1520, + 1521, + 1522, + 1523, + 1524, + 1525, + 1526, + 1527, + 1528, + 1529, + 1530, + 1531, + 1532, + 1533, + 1534, + 1535, + 1536, + 1537, + 1538, + 1539, + 1540, + 1541, + 1542, + 1543, + 1544, + 1545, + 1546, + 1547, + 1548, + 1549, + 1550, + 1551, + 1552, + 1553, + 1554, + 1555, + 1556, + 1557, + 1558, + 1559, + 1560, + 1561, + 1562, + 1563, + 1564, + 1565, + 1566, + 1567, + 1568, + 1569, + 1570, + 1571, + 1572, + 1573, + 1574, + 1575, + 1576, + 1577, + 1578, + 1579, + 1580, + 1581, + 1582, + 1583, + 1584, + 1585, + 1586, + 1587, + 1588, + 1589, + 1590, + 1591, + 1592, + 1593, + 1594, + 1595, + 1596, + 1597, + 1598, + 1599, + 1600, + 1601, + 1602, + 1603, + 1604, + 1605, + 1606, + 1607, + 1608, + 1609, + 1610, + 1611, + 1612, + 1613, + 1614, + 1615, + 1616, + 1617, + 1618, + 1619, + 1620, + 1621, + 1622, + 1623, + 1624, + 1625, + 1626, + 1627, + 1628, + 1629, + 1630, + 1631, + 1632, + 1633, + 1634, + 1635, + 1636, + 1637, + 1638, + 1639, + 1640, + 1641, + 1642, + 1643, + 1644, + 1645, + 1646, + 1647, + 1648, + 1649, + 1650, + 1651, + 1652, + 1653, + 1654, + 1655, + 1656, + 1657, + 1658, + 1659, + 1660, + 1661, + 1662, + 1663, + 1664, + 1665, + 1666, + 1667, + 1668, + 1669, + 1670, + 1671, + 1672, + 1673, + 1674, + 1675, + 1676, + 1677, + 1678, + 1679, + 1680, + 1681, + 1682, + 1683, + 1684, + 1685, + 1686, + 1687, + 1688, + 1689, + 1690, + 1691, + 1692, + 1693, + 1694, + 1695, + 1696, + 1697, + 1698, + 1699, + 1700, + 1701, + 1702, + 1703, + 1704, + 1705, + 1706, + 1707, + 1708, + 1709, + 1710, + 1711, + 1712, + 1713, + 1714, + 1715, + 1716, + 1717, + 1718, + 1719, + 1720, + 1721, + 1722, + 1723, + 1724, + 1725, + 1728, + 1729, + 1730, + 1731, + 1732, + 1733, + 1734, + 1735, + 1736, + 1737, + 1738, + 1739, + 1740, + 1741, + 1742, + 1743, + 1744, + 1745, + 1746, + 1747, + 1748, + 1749, + 1750, + 1751, + 1752, + 1753, + 1754, + 1755, + 1756, + 1757, + 1758, + 1760, + 1761, + 1762, + 1763, + 1764, + 1765, + 1766, + 1767, + 1768, + 1769, + 1770, + 1771, + 1772, + 1773, + 1774, + 1775, + 1776, + 1777, + 1778, + 1779, + 1780, + 1781, + 1782, + 1783, + 1784, + 1785, + 1786, + 1787, + 1788, + 1789, + 1791, + 1792, + 1793, + 1794, + 1795, + 1796, + 1797, + 1798, + 1799, + 1800, + 1801, + 1802, + 1803, + 1804, + 1805, + 1806, + 1807, + 1808, + 1809, + 1810, + 1811, + 1812, + 1813, + 1814, + 1815, + 1816, + 1817, + 1818, + 1819, + 1820, + 1821, + 1822, + 1823, + 1824, + 1825, + 1826, + 1827, + 1828, + 1829, + 1830, + 1831, + 1832, + 1833, + 1834, + 1835, + 1836, + 1837, + 1838, + 1839, + 1840, + 1841, + 1842, + 1843, + 1844, + 1845, + 1846, + 1847, + 1848, + 1849, + 1850, + 1851, + 1852, + 1853, + 1854, + 1855, + 1856, + 1857, + 1858, + 1859, + 1860, + 1861, + 1862, + 1863, + 1864, + 1865, + 1866, + 1867, + 1868, + 1869, + 1870, + 1871, + 1872, + 1873, + 1874, + 1875, + 1876, + 1877, + 1878, + 1879, + 1880, + 1881, + 1882, + 1883, + 1884, + 1885, + 1887, + 1888, + 1889, + 1890, + 1891, + 1892, + 1893, + 1894, + 1895, + 1896, + 1897, + 1899, + 1900, + 1901, + 1902, + 1903, + 1904, + 1905, + 1906, + 1907, + 1908, + 1909, + 1910, + 1911, + 1912, + 1913, + 1914, + 1915, + 1916, + 1917, + 1918, + 1919, + 1920, + 1921, + 1922, + 1923, + 1924, + 1925, + 1926, + 1927, + 1928, + 1929, + 1930, + 1935, + 1936, + 1937, + 1938, + 1939, + 1942, + 1943, + 1944, + 1945, + 1946, + 1947, + 1950, + 1951, + 1952, + 1953, + 1954, + 1955, + 1956, + 1957, + 1958, + 1959, + 1960, + 1961, + 1962, + 1963, + 1964, + 1965, + 1966, + 1967, + 1968, + 1969, + 1970, + 1971, + 1972, + 1973, + 1974, + 1975, + 1976, + 1977, + 1978, + 1979, + 1980, + 1981, + 1982, + 1983, + 1984, + 1985, + 1986, + 1987, + 1988, + 1989, + 1990, + 1991, + 1992, + 1993, + 1994, + 1995, + 1996, + 1997, + 1998, + 1999, + 2000, + 2001, + 2002, + 2003, + 2004, + 2005, + 2006, + 2007, + 2008, + 2009, + 2010, + 2011, + 2012, + 2013, + 2014, + 2015, + 2016, + 2017, + 2018, + 2019, + 2020, + 2021, + 2022, + 2023, + 2024, + 2025, + 2026, + 2027, + 2028, + 2029, + 2030, + 2031, + 2032, + 2033, + 2034, + 2035, + 2037, + 2038, + 2039, + 2040, + 2041, + 2042, + 2043, + 2044, + 2045, + 2046, + 2047, + 2048, + 2049, + 2050, + 2051, + 2052, + 2053, + 2054, + 2055, + 2056, + 2057, + 2058, + 2059, + 2060, + 2061, + 2062, + 2063, + 2064, + 2065, + 2066, + 2067, + 2068, + 2069, + 2070, + 2071, + 2072, + 2073, + 2074, + 2075, + 2076, + 2077, + 2078, + 2079, + 2080, + 2081, + 2082, + 2083, + 2084, + 2085, + 2086, + 2087, + 2088, + 2089, + 2090, + 2091, + 2092, + 2093, + 2094, + 2095, + 2096, + 2097, + 2098, + 2099, + 2100, + 2101, + 2102, + 2103, + 2104, + 2105, + 2106, + 2107, + 2108, + 2109, + 2110, + 2111, + 2112, + 2113, + 2114, + 2115, + 2116, + 2117, + 2118, + 2119, + 2120, + 2121, + 2122, + 2123, + 2124, + 2125, + 2126, + 2127, + 2128, + 2129, + 2130, + 2131, + 2132, + 2133, + 2134, + 2135, + 2136, + 2137, + 2138, + 2139, + 2140, + 2141, + 2142, + 2143, + 2144, + 2145, + 2146, + 2147, + 2148, + 2152, + 2153, + 2154, + 2155, + 2156, + 2157, + 2158, + 2159, + 2160, + 2161, + 2162, + 2163, + 2164, + 2165, + 2166, + 2167, + 2168, + 2169, + 2170, + 2171, + 2172, + 2173, + 2174, + 2175, + 2176, + 2177, + 2178, + 2179, + 2180, + 2181, + 2182, + 2183, + 2184, + 2185, + 2186, + 2187, + 2188, + 2189, + 2190, + 2191, + 2192, + 2193, + 2194, + 2195, + 2196, + 2197, + 2198, + 2199, + 2200, + 2201, + 2202, + 2203, + 2204, + 2205, + 2206, + 2207, + 2208, + 2209, + 2210, + 2211, + 2212, + 2213, + 2214, + 2215, + 2216, + 2217, + 2220, + 2221, + 2222, + 2223, + 2224, + 2225, + 2226, + 2227, + 2228, + 2229, + 2230, + 2231, + 2232, + 2233, + 2234, + 2235, + 2236, + 2237, + 2238, + 2239, + 2240, + 2241, + 2242, + 2243, + 2244, + 2245, + 2246, + 2247, + 2248, + 2249, + 2250, + 2251, + 2252, + 2253, + 2254, + 2255, + 2256, + 2257, + 2258, + 2259, + 2260, + 2261, + 2262, + 2263, + 2264, + 2265, + 2266, + 2267, + 2268, + 2269, + 2270, + 2271, + 2272, + 2273, + 2274, + 2275, + 2276, + 2277, + 2278, + 2279, + 2280, + 2281, + 2282, + 2283, + 2284, + 2285, + 2286, + 2287, + 2288, + 2289, + 2290, + 2291, + 2292, + 2293, + 2294, + 2295, + 2296, + 2297, + 2298, + 2299, + 2300, + 2301, + 2302, + 2303, + 2304, + 2305, + 2306, + 2307, + 2308, + 2309, + 2310, + 2311, + 2312, + 2313, + 2314, + 2315, + 2316, + 2317, + 2318, + 2319, + 2320, + 2321, + 2322, + 2323, + 2324, + 2325, + 2326, + 2327, + 2328, + 2329, + 2330, + 2331, + 2332, + 2333, + 2334, + 2335, + 2336, + 2337, + 2338, + 2339, + 2340, + 2341, + 2342, + 2343, + 2344, + 2345, + 2346, + 2347, + 2348, + 2349, + 2350, + 2351, + 2352, + 2353, + 2354, + 2355, + 2356, + 2357, + 2358, + 2359, + 2360, + 2361, + 2362, + 2363, + 2364, + 2365, + 2366, + 2367, + 2368, + 2369, + 2370, + 2371, + 2372, + 2373, + 2374, + 2375, + 2376, + 2377, + 2378, + 2379, + 2380, + 2381, + 2382, + 2383, + 2384, + 2385, + 2386, + 2387, + 2388, + 2389, + 2390, + 2391, + 2392, + 2393, + 2394, + 2395, + 2396, + 2397, + 2398, + 2399, + 2400, + 2401, + 2402, + 2403, + 2404, + 2405, + 2406, + 2407, + 2408, + 2409, + 2410, + 2411, + 2412, + 2413, + 2414, + 2415, + 2416, + 2417, + 2418, + 2419, + 2420, + 2421, + 2422, + 2423, + 2424, + 2425, + 2426, + 2427, + 2428, + 2429, + 2430, + 2431, + 2432, + 2433, + 2434, + 2435, + 2436, + 2437, + 2438, + 2439, + 2440, + 2441, + 2442, + 2443, + 2444, + 2445, + 2446, + 2447, + 2448, + 2449, + 2450, + 2451, + 2452, + 2453, + 2454, + 2455, + 2456, + 2457, + 2458, + 2459, + 2460, + 2461, + 2462, + 2463, + 2464, + 2465, + 2466, + 2467, + 2468, + 2469, + 2470, + 2471, + 2472, + 2473, + 2474, + 2475, + 2476, + 2477, + 2478, + 2479, + 2480, + 2481, + 2482, + 2483, + 2485, + 2486, + 2487, + 2488, + 2489, + 2490, + 2491, + 2492, + 2493, + 2494, + 2495, + 2496, + 2497, + 2498, + 2499, + 2500, + 2501, + 2502, + 2503, + 2504, + 2505, + 2506, + 2507, + 2508, + 2509, + 2510, + 2511, + 2512, + 2513, + 2514, + 2515, + 2516, + 2517, + 2518, + 2519, + 2520, + 2521, + 2522, + 2523, + 2524, + 2525, + 2526, + 2527, + 2528, + 2529, + 2530, + 2532, + 2533, + 2534, + 2535, + 2536, + 2537, + 2538, + 2539, + 2540, + 2541, + 2542, + 2543, + 2544, + 2545, + 2546, + 2547, + 2548, + 2549, + 2550, + 2551, + 2552, + 2553, + 2554, + 2555, + 2556, + 2557, + 2558, + 2559, + 2560, + 2561, + 2562, + 2563, + 2564, + 2565, + 2566, + 2567, + 2568, + 2569, + 2570, + 2571, + 2572, + 2573, + 2574, + 2575, + 2576, + 2577, + 2578, + 2579, + 2580, + 2581, + 2582, + 2583, + 2584, + 2585, + 2586, + 2587, + 2588, + 2589, + 2590, + 2591, + 2592, + 2593, + 2594, + 2595, + 2596, + 2597, + 2598, + 2599, + 2600, + 2601, + 2602, + 2603, + 2604, + 2605, + 2606, + 2607, + 2608, + 2609, + 2610, + 2611, + 2612, + 2613, + 2614, + 2615, + 2616, + 2617, + 2618, + 2619, + 2620, + 2621, + 2622, + 2623, + 2624, + 2625, + 2626, + 2627, + 2628, + 2629, + 2630, + 2631, + 2632, + 2633, + 2634, + 2635, + 2636, + 2637, + 2638, + 2639, + 2640, + 2641, + 2642, + 2643, + 2644, + 2645, + 2646, + 2647, + 2648, + 2649, + 2650, + 2651, + 2652, + 2653, + 2654, + 2655, + 2656, + 2657, + 2658, + 2659, + 2660, + 2661, + 2662, + 2663, + 2664, + 2665, + 2666, + 2667, + 2668, + 2669, + 2670, + 2671, + 2672, + 2673, + 2674, + 2675, + 2676, + 2677, + 2678, + 2679, + 2680, + 2681, + 2682, + 2683, + 2684, + 2685, + 2686, + 2687, + 2688, + 2689, + 2690, + 2691, + 2692, + 2693, + 2694, + 2695, + 2696, + 2697, + 2698, + 2699, + 2700, + 2701, + 2702, + 2703, + 2704, + 2705, + 2706, + 2707, + 2708, + 2709, + 2710, + 2711, + 2712, + 2713, + 2714, + 2715, + 2716, + 2717, + 2718, + 2719, + 2720, + 2721, + 2722, + 2723, + 2724, + 2725, + 2726, + 2727, + 2728, + 2729, + 2730, + 2731, + 2732, + 2733, + 2734, + 2735, + 2736, + 2737, + 2738, + 2739, + 2740, + 2741, + 2742, + 2743, + 2744, + 2745, + 2746, + 2747, + 2748, + 2749, + 2750, + 2751, + 2752, + 2753, + 2754, + 2755, + 2756, + 2757, + 2758, + 2759, + 2760, + 2761, + 2762, + 2763, + 2764, + 2765, + 2766, + 2767, + 2768, + 2769, + 2770, + 2771, + 2772, + 2773, + 2774, + 2775, + 2776, + 2777, + 2778, + 2779, + 2780, + 2781, + 2782, + 2783, + 2784, + 2785, + 2786, + 2787, + 2788, + 2789, + 2790, + 2791, + 2792, + 2793, + 2794, + 2795, + 2796, + 2797, + 2798, + 2799, + 2800, + 2801, + 2802, + 2803, + 2804, + 2805, + 2806, + 2807, + 2808, + 2809, + 2810, + 2811, + 2812, + 2813, + 2814, + 2815, + 2816, + 2817, + 2818, + 2819, + 2820, + 2821, + 2822, + 2823, + 2824, + 2825, + 2826, + 2827, + 2828, + 2829, + 2830, + 2831, + 2832, + 2833, + 2834, + 2835, + 2836, + 2837, + 2838, + 2839, + 2840, + 2841, + 2842, + 2843, + 2844, + 2845, + 2846, + 2847, + 2848, + 2849, + 2850, + 2851, + 2852, + 2853, + 2854, + 2855, + 2856, + 2857, + 2858, + 2859, + 2860, + 2861, + 2862, + 2863, + 2864, + 2865, + 2866, + 2867, + 2868, + 2869, + 2871, + 2872, + 2873, + 2874, + 2875, + 2876, + 2877, + 2878, + 2879, + 2880, + 2881, + 2882, + 2883, + 2884, + 2885, + 2886, + 2887, + 2888, + 2889, + 2890, + 2891, + 2892, + 2894, + 2895, + 2896, + 2897, + 2898, + 2899, + 2900, + 2901, + 2902, + 2903, + 2904, + 2905, + 2906, + 2907, + 2908, + 2909, + 2910, + 2911, + 2912, + 2913, + 2914, + 2915, + 2916, + 2917, + 2918, + 2919, + 2920, + 2921, + 2922, + 2923, + 2924, + 2925, + 2926, + 2927, + 2928, + 2929, + 2930, + 2931, + 2932, + 2933, + 2934, + 2935, + 2936, + 2937, + 2938, + 2939, + 2940, + 2941, + 2942, + 2943, + 2944, + 2945, + 2946, + 2947, + 2948, + 2949, + 2950, + 2951, + 2952, + 2953, + 2954, + 2955, + 2956, + 2957, + 2958, + 2959, + 2960, + 2961, + 2962, + 2963, + 2965, + 2966, + 2967, + 2968, + 2969, + 2970, + 2971, + 2972, + 2973, + 2974, + 2975, + 2977, + 2978, + 2979, + 2980, + 2981, + 2982, + 2983, + 2984, + 2985, + 2986, + 2987, + 2988, + 2989, + 2990, + 2991, + 2992, + 2993, + 2994, + 2995, + 2996, + 2997, + 2998, + 2999, + 3000, + 3001, + 3002, + 3003, + 3004, + 3005, + 3006, + 3007, + 3008, + 3009, + 3010, + 3011, + 3014, + 3015, + 3016, + 3017, + 3018, + 3019, + 3020, + 3021, + 3022, + 3023, + 3024, + 3025, + 3026, + 3027, + 3028, + 3029, + 3030, + 3031, + 3032, + 3033, + 3034, + 3035, + 3036, + 3037, + 3038, + 3039, + 3040, + 3041, + 3042, + 3043, + 3044, + 3045, + 3046, + 3047, + 3048, + 3049, + 3050, + 3051, + 3052, + 3053, + 3054, + 3055, + 3056, + 3057, + 3058, + 3059, + 3060, + 3061, + 3062, + 3063, + 3064, + 3065, + 3066, + 3067, + 3068, + 3069, + 3070, + 3071, + 3072, + 3073, + 3074, + 3075, + 3076, + 3077, + 3078, + 3079, + 3080, + 3081, + 3082, + 3083, + 3084, + 3085, + 3086, + 3087, + 3088, + 3089, + 3090, + 3091, + 3092, + 3093, + 3094, + 3095, + 3096, + 3097, + 3098, + 3099, + 3100, + 3101, + 3102, + 3103, + 3104, + 3105, + 3106, + 3107, + 3108, + 3109, + 3110, + 3111, + 3112, + 3113, + 3114, + 3115, + 3116, + 3117, + 3118, + 3119, + 3120, + 3121, + 3122, + 3123, + 3124, + 3125, + 3126, + 3127, + 3128, + 3129, + 3130, + 3131, + 3132, + 3133, + 3134, + 3135, + 3136, + 3137, + 3138, + 3139, + 3140, + 3141, + 3142, + 3143, + 3144, + 3145, + 3146, + 3147, + 3148, + 3149, + 3150, + 3151, + 3152, + 3153, + 3154, + 3155, + 3156, + 3157, + 3158, + 3159, + 3160, + 3161, + 3162, + 3163, + 3164, + 3165, + 3166, + 3167, + 3168, + 3169, + 3170, + 3171, + 3172, + 3173, + 3174, + 3175, + 3176, + 3177, + 3178, + 3179, + 3180, + 3181, + 3182, + 3183, + 8731, + 8732, + 8733, + 8734, + 8735, + 8736, + 8737, + 8738, + 8739, + 8740, + 8741, + 8742, + 8743, + 8744, + 8745, + 8746, + 8747, + 8748, + 8749, + 8750, + 8751, + 8752, + 8753, + 8754, + 8755, + 8756, + 8757, + 8758, + 8759, + 8760, + 8761, + 8762, + 8763, + 8764, + 8765, + 8766, + 8767, + 8768, + 8769, + 8770, + 8771, + 8772, + 8773, + 8774, + 8775, + 8776, + 8777, + 8778, + 8779, + 8780, + 8781, + 8782, + 8783, + 8784, + 8785, + 8786, + 8787, + 8788, + 8789, + 8790, + 8791, + 8792, + 8793, + 8794, + 8795, + 8796, + 8797, + 8798, + 8799, + 8800, + 8801, + 8802, + 8803, + 8804, + 8805, + 8806, + 8807, + 8808, + 8809, + 8810, + 8815, + 8816, + 8817, + 8818, + 8819, + 8820, + 8821, + 8822, + 8823, + 8824, + 8825, + 8826, + 8827, + 8828, + 8829, + 8830, + 8831, + 8832, + 8833, + 8834, + 8835, + 8836, + 8837, + 8838, + 8926, + 8927, + 8928, + 8931, + 8932, + 8933, + 8939, + 8941, + 8942, + 8943, + 8944, + 8945, + 8946, + 8947, + 8948, + 8949, + 8950, + 8951, + 8952, + 8953, + 8954, + 8955, + 8956, + 8957, + 8958, + 8959, + 8960, + 8961, + 8962, + 8963, + 8964, + 8965, + 8966, + 8967, + 8968, + 8969, + 8970, + 8971, + 8972, + 8973, + 8974, + 8975, + 8976, + 8977, + 8978, + 8979, + 8980, + 8981, + 8982, + 8983, + 8984, + 8985, + 8986, + 8987, + 8989, + 8990, + 8991, + 8992, + 8993, + 8994, + 8995, + 8996, + 8997, + 8998, + 8999, + 9000, + 9001, + 9002, + 9003, + 9004, + 9005, + 9006, + 9007, + 9008, + 9009, + 9010, + 9011, + 9012, + 9013, + 9014, + 9015, + 9016, + 9017, + 9018, + 9019, + 9028, + 9029, + 9030, + 9031, + 9032, + 9033, + 9034, + 9035, + 9036, + 9037, + 9038, + 9039, + 9040, + 9041, + 9042, + 9043, + 9044, + 9045, + 9046, + 9047, + 9048, + 9049, + 9050, + 9051, + 9052, + 9053, + 9054, + 9055, + 9056, + 9057, + 9058, + 9059, + 9060, + 9061, + 9062, + 9063, + 9064, + 9065, + 9066, + 9067, + 9068, + 9069, + 9070, + 9071, + 9072, + 9073, + 9074, + 9075, + 9076, + 9077, + 9078, + 9079, + 9080, + 9081, + 9082, + 9083, + 9084, + 9085, + 9086, + 9087, + 9088, + 9089, + 9090, + 9091, + 9092, + 9093, + 9094, + 9095, + 9096, + 9097, + 9098, + 9099, + 9100, + 9101, + 9102, + 9103, + 9104, + 9105, + 9106, + 9107, + 9108, + 9109, + 9110, + 9111, + 9112, + 9113, + 9114, + 9115, + 9116, + 9117, + 9118, + 9119, + 9120, + 9121, + 9122, + 9123, + 9124, + 9125, + 9126, + 9127, + 9128, + 9129, + 9130, + 9131, + 9132, + 9133, + 9134, + 9135, + 9136, + 9137, + 9138, + 9139, + 9140, + 9141, + 9142, + 9143, + 9144, + 9145, + 9146, + 9147, + 9148, + 9149, + 9150, + 9151, + 9152, + 9153, + 9154, + 9155, + 9156, + 9157, + 9158, + 9159, + 9160, + 9162, + 9163, + 9164, + 9166, + 9167, + 9168, + 9169, + 9170, + 9171, + 9172, + 9173, + 9174, + 9175, + 9176, + 9177, + 9178, + 9179, + 9180, + 9181, + 9182, + 9183, + 9184, + 9185, + 9186, + 9187, + 9188, + 9189, + 9190, + 9191, + 9192, + 9193, + 9194, + 9195, + 9196, + 9197, + 9198, + 9199, + 9200, + 9201, + 9202, + 9203, + 9204, + 9205, + 9206, + 9207, + 9208, + 9209, + 9210, + 9211, + 9212, + 9213, + 9214, + 9215, + 9216, + 9217, + 9218, + 9219, + 9220, + 9221, + 9222, + 9223, + 9224, + 9225, + 9226, + 9227, + 9228, + 9229, + 9230, + 9231, + 9232, + 9233, + 9234, + 9235, + 9236, + 9237, + 9238, + 9239, + 9240, + 9241, + 9242, + 9243, + 9244, + 9245, + 9246, + 9247, + 9248, + 9249, + 9250, + 9251, + 9252, + 9253, + 9254, + 9255, + 9256, + 9257, + 9258, + 9259, + 9260, + 9261, + 9262, + 9263, + 9264, + 9265, + 9266, + 9267, + 9268, + 9269, + 9270, + 9271, + 9272, + 9273, + 9274, + 9275, + 9276, + 9277, + 9278, + 9279, + 9280, + 9281, + 9282, + 9283, + 9284, + 9285, + 9286, + 9287, + 9288, + 9289, + 9290, + 9291, + 9292, + 9293, + 9294, + 9295, + 9296, + 9297, + 9298, + 9299, + 9300, + 9301, + 9302, + 9303, + 9304, + 9305, + 9306, + 9307, + 9308, + 9309, + 9310, + 9311, + 9312, + 9313, + 9314, + 9315, + 9316, + 9317, + 9318, + 9319, + 9320, + 9321, + 9322, + 9323, + 9324, + 9325, + 9326, + 9327, + 9328, + 9329, + 9330, + 9331, + 9332, + 9333, + 9334, + 9335, + 9336, + 9337, + 9338, + 9339, + 9340, + 9341, + 9342, + 9343, + 9344, + 9345, + 9346, + 9347, + 9348, + 9349, + 9350, + 9351, + 9352, + 9353, + 9354, + 9355, + 9356, + 9357, + 9358, + 9359, + 9360, + 9361, + 9362, + 9363, + 9364, + 9365, + 9366, + 9367, + 9368, + 9369, + 9370, + 9371, + 9372, + 9373, + 9374, + 9375, + 9376, + 9377, + 9378, + 9379, + 9380, + 9381, + 9382 + ], + "leftEye": [ + 9383, + 9384, + 9385, + 9386, + 9387, + 9388, + 9389, + 9390, + 9391, + 9392, + 9393, + 9394, + 9395, + 9396, + 9397, + 9398, + 9399, + 9400, + 9401, + 9402, + 9403, + 9404, + 9405, + 9406, + 9407, + 9408, + 9409, + 9410, + 9411, + 9412, + 9413, + 9414, + 9415, + 9416, + 9417, + 9418, + 9419, + 9420, + 9421, + 9422, + 9423, + 9424, + 9425, + 9426, + 9427, + 9428, + 9429, + 9430, + 9431, + 9432, + 9433, + 9434, + 9435, + 9436, + 9437, + 9438, + 9439, + 9440, + 9441, + 9442, + 9443, + 9444, + 9445, + 9446, + 9447, + 9448, + 9449, + 9450, + 9451, + 9452, + 9453, + 9454, + 9455, + 9456, + 9457, + 9458, + 9459, + 9460, + 9461, + 9462, + 9463, + 9464, + 9465, + 9466, + 9467, + 9468, + 9469, + 9470, + 9471, + 9472, + 9473, + 9474, + 9475, + 9476, + 9477, + 9478, + 9479, + 9480, + 9481, + 9482, + 9483, + 9484, + 9485, + 9486, + 9487, + 9488, + 9489, + 9490, + 9491, + 9492, + 9493, + 9494, + 9495, + 9496, + 9497, + 9498, + 9499, + 9500, + 9501, + 9502, + 9503, + 9504, + 9505, + 9506, + 9507, + 9508, + 9509, + 9510, + 9511, + 9512, + 9513, + 9514, + 9515, + 9516, + 9517, + 9518, + 9519, + 9520, + 9521, + 9522, + 9523, + 9524, + 9525, + 9526, + 9527, + 9528, + 9529, + 9530, + 9531, + 9532, + 9533, + 9534, + 9535, + 9536, + 9537, + 9538, + 9539, + 9540, + 9541, + 9542, + 9543, + 9544, + 9545, + 9546, + 9547, + 9548, + 9549, + 9550, + 9551, + 9552, + 9553, + 9554, + 9555, + 9556, + 9557, + 9558, + 9559, + 9560, + 9561, + 9562, + 9563, + 9564, + 9565, + 9566, + 9567, + 9568, + 9569, + 9570, + 9571, + 9572, + 9573, + 9574, + 9575, + 9576, + 9577, + 9578, + 9579, + 9580, + 9581, + 9582, + 9583, + 9584, + 9585, + 9586, + 9587, + 9588, + 9589, + 9590, + 9591, + 9592, + 9593, + 9594, + 9595, + 9596, + 9597, + 9598, + 9599, + 9600, + 9601, + 9602, + 9603, + 9604, + 9605, + 9606, + 9607, + 9608, + 9609, + 9610, + 9611, + 9612, + 9613, + 9614, + 9615, + 9616, + 9617, + 9618, + 9619, + 9620, + 9621, + 9622, + 9623, + 9624, + 9625, + 9626, + 9627, + 9628, + 9629, + 9630, + 9631, + 9632, + 9633, + 9634, + 9635, + 9636, + 9637, + 9638, + 9639, + 9640, + 9641, + 9642, + 9643, + 9644, + 9645, + 9646, + 9647, + 9648, + 9649, + 9650, + 9651, + 9652, + 9653, + 9654, + 9655, + 9656, + 9657, + 9658, + 9659, + 9660, + 9661, + 9662, + 9663, + 9664, + 9665, + 9666, + 9667, + 9668, + 9669, + 9670, + 9671, + 9672, + 9673, + 9674, + 9675, + 9676, + 9677, + 9678, + 9679, + 9680, + 9681, + 9682, + 9683, + 9684, + 9685, + 9686, + 9687, + 9688, + 9689, + 9690, + 9691, + 9692, + 9693, + 9694, + 9695, + 9696, + 9697, + 9698, + 9699, + 9700, + 9701, + 9702, + 9703, + 9704, + 9705, + 9706, + 9707, + 9708, + 9709, + 9710, + 9711, + 9712, + 9713, + 9714, + 9715, + 9716, + 9717, + 9718, + 9719, + 9720, + 9721, + 9722, + 9723, + 9724, + 9725, + 9726, + 9727, + 9728, + 9729, + 9730, + 9731, + 9732, + 9733, + 9734, + 9735, + 9736, + 9737, + 9738, + 9739, + 9740, + 9741, + 9742, + 9743, + 9744, + 9745, + 9746, + 9747, + 9748, + 9749, + 9750, + 9751, + 9752, + 9753, + 9754, + 9755, + 9756, + 9757, + 9758, + 9759, + 9760, + 9761, + 9762, + 9763, + 9764, + 9765, + 9766, + 9767, + 9768, + 9769, + 9770, + 9771, + 9772, + 9773, + 9774, + 9775, + 9776, + 9777, + 9778, + 9779, + 9780, + 9781, + 9782, + 9783, + 9784, + 9785, + 9786, + 9787, + 9788, + 9789, + 9790, + 9791, + 9792, + 9793, + 9794, + 9795, + 9796, + 9797, + 9798, + 9799, + 9800, + 9801, + 9802, + 9803, + 9804, + 9805, + 9806, + 9807, + 9808, + 9809, + 9810, + 9811, + 9812, + 9813, + 9814, + 9815, + 9816, + 9817, + 9818, + 9819, + 9820, + 9821, + 9822, + 9823, + 9824, + 9825, + 9826, + 9827, + 9828, + 9829, + 9830, + 9831, + 9832, + 9833, + 9834, + 9835, + 9836, + 9837, + 9838, + 9839, + 9840, + 9841, + 9842, + 9843, + 9844, + 9845, + 9846, + 9847, + 9848, + 9849, + 9850, + 9851, + 9852, + 9853, + 9854, + 9855, + 9856, + 9857, + 9858, + 9859, + 9860, + 9861, + 9862, + 9863, + 9864, + 9865, + 9866, + 9867, + 9868, + 9869, + 9870, + 9871, + 9872, + 9873, + 9874, + 9875, + 9876, + 9877, + 9878, + 9879, + 9880, + 9881, + 9882, + 9883, + 9884, + 9885, + 9886, + 9887, + 9888, + 9889, + 9890, + 9891, + 9892, + 9893, + 9894, + 9895, + 9896, + 9897, + 9898, + 9899, + 9900, + 9901, + 9902, + 9903, + 9904, + 9905, + 9906, + 9907, + 9908, + 9909, + 9910, + 9911, + 9912, + 9913, + 9914, + 9915, + 9916, + 9917, + 9918, + 9919, + 9920, + 9921, + 9922, + 9923, + 9924, + 9925, + 9926, + 9927, + 9928 + ], + "rightEye": [ + 9929, + 9930, + 9931, + 9932, + 9933, + 9934, + 9935, + 9936, + 9937, + 9938, + 9939, + 9940, + 9941, + 9942, + 9943, + 9944, + 9945, + 9946, + 9947, + 9948, + 9949, + 9950, + 9951, + 9952, + 9953, + 9954, + 9955, + 9956, + 9957, + 9958, + 9959, + 9960, + 9961, + 9962, + 9963, + 9964, + 9965, + 9966, + 9967, + 9968, + 9969, + 9970, + 9971, + 9972, + 9973, + 9974, + 9975, + 9976, + 9977, + 9978, + 9979, + 9980, + 9981, + 9982, + 9983, + 9984, + 9985, + 9986, + 9987, + 9988, + 9989, + 9990, + 9991, + 9992, + 9993, + 9994, + 9995, + 9996, + 9997, + 9998, + 9999, + 10000, + 10001, + 10002, + 10003, + 10004, + 10005, + 10006, + 10007, + 10008, + 10009, + 10010, + 10011, + 10012, + 10013, + 10014, + 10015, + 10016, + 10017, + 10018, + 10019, + 10020, + 10021, + 10022, + 10023, + 10024, + 10025, + 10026, + 10027, + 10028, + 10029, + 10030, + 10031, + 10032, + 10033, + 10034, + 10035, + 10036, + 10037, + 10038, + 10039, + 10040, + 10041, + 10042, + 10043, + 10044, + 10045, + 10046, + 10047, + 10048, + 10049, + 10050, + 10051, + 10052, + 10053, + 10054, + 10055, + 10056, + 10057, + 10058, + 10059, + 10060, + 10061, + 10062, + 10063, + 10064, + 10065, + 10066, + 10067, + 10068, + 10069, + 10070, + 10071, + 10072, + 10073, + 10074, + 10075, + 10076, + 10077, + 10078, + 10079, + 10080, + 10081, + 10082, + 10083, + 10084, + 10085, + 10086, + 10087, + 10088, + 10089, + 10090, + 10091, + 10092, + 10093, + 10094, + 10095, + 10096, + 10097, + 10098, + 10099, + 10100, + 10101, + 10102, + 10103, + 10104, + 10105, + 10106, + 10107, + 10108, + 10109, + 10110, + 10111, + 10112, + 10113, + 10114, + 10115, + 10116, + 10117, + 10118, + 10119, + 10120, + 10121, + 10122, + 10123, + 10124, + 10125, + 10126, + 10127, + 10128, + 10129, + 10130, + 10131, + 10132, + 10133, + 10134, + 10135, + 10136, + 10137, + 10138, + 10139, + 10140, + 10141, + 10142, + 10143, + 10144, + 10145, + 10146, + 10147, + 10148, + 10149, + 10150, + 10151, + 10152, + 10153, + 10154, + 10155, + 10156, + 10157, + 10158, + 10159, + 10160, + 10161, + 10162, + 10163, + 10164, + 10165, + 10166, + 10167, + 10168, + 10169, + 10170, + 10171, + 10172, + 10173, + 10174, + 10175, + 10176, + 10177, + 10178, + 10179, + 10180, + 10181, + 10182, + 10183, + 10184, + 10185, + 10186, + 10187, + 10188, + 10189, + 10190, + 10191, + 10192, + 10193, + 10194, + 10195, + 10196, + 10197, + 10198, + 10199, + 10200, + 10201, + 10202, + 10203, + 10204, + 10205, + 10206, + 10207, + 10208, + 10209, + 10210, + 10211, + 10212, + 10213, + 10214, + 10215, + 10216, + 10217, + 10218, + 10219, + 10220, + 10221, + 10222, + 10223, + 10224, + 10225, + 10226, + 10227, + 10228, + 10229, + 10230, + 10231, + 10232, + 10233, + 10234, + 10235, + 10236, + 10237, + 10238, + 10239, + 10240, + 10241, + 10242, + 10243, + 10244, + 10245, + 10246, + 10247, + 10248, + 10249, + 10250, + 10251, + 10252, + 10253, + 10254, + 10255, + 10256, + 10257, + 10258, + 10259, + 10260, + 10261, + 10262, + 10263, + 10264, + 10265, + 10266, + 10267, + 10268, + 10269, + 10270, + 10271, + 10272, + 10273, + 10274, + 10275, + 10276, + 10277, + 10278, + 10279, + 10280, + 10281, + 10282, + 10283, + 10284, + 10285, + 10286, + 10287, + 10288, + 10289, + 10290, + 10291, + 10292, + 10293, + 10294, + 10295, + 10296, + 10297, + 10298, + 10299, + 10300, + 10301, + 10302, + 10303, + 10304, + 10305, + 10306, + 10307, + 10308, + 10309, + 10310, + 10311, + 10312, + 10313, + 10314, + 10315, + 10316, + 10317, + 10318, + 10319, + 10320, + 10321, + 10322, + 10323, + 10324, + 10325, + 10326, + 10327, + 10328, + 10329, + 10330, + 10331, + 10332, + 10333, + 10334, + 10335, + 10336, + 10337, + 10338, + 10339, + 10340, + 10341, + 10342, + 10343, + 10344, + 10345, + 10346, + 10347, + 10348, + 10349, + 10350, + 10351, + 10352, + 10353, + 10354, + 10355, + 10356, + 10357, + 10358, + 10359, + 10360, + 10361, + 10362, + 10363, + 10364, + 10365, + 10366, + 10367, + 10368, + 10369, + 10370, + 10371, + 10372, + 10373, + 10374, + 10375, + 10376, + 10377, + 10378, + 10379, + 10380, + 10381, + 10382, + 10383, + 10384, + 10385, + 10386, + 10387, + 10388, + 10389, + 10390, + 10391, + 10392, + 10393, + 10394, + 10395, + 10396, + 10397, + 10398, + 10399, + 10400, + 10401, + 10402, + 10403, + 10404, + 10405, + 10406, + 10407, + 10408, + 10409, + 10410, + 10411, + 10412, + 10413, + 10414, + 10415, + 10416, + 10417, + 10418, + 10419, + 10420, + 10421, + 10422, + 10423, + 10424, + 10425, + 10426, + 10427, + 10428, + 10429, + 10430, + 10431, + 10432, + 10433, + 10434, + 10435, + 10436, + 10437, + 10438, + 10439, + 10440, + 10441, + 10442, + 10443, + 10444, + 10445, + 10446, + 10447, + 10448, + 10449, + 10450, + 10451, + 10452, + 10453, + 10454, + 10455, + 10456, + 10457, + 10458, + 10459, + 10460, + 10461, + 10462, + 10463, + 10464, + 10465, + 10466, + 10467, + 10468, + 10469, + 10470, + 10471, + 10472, + 10473, + 10474 + ], + "leftLeg": [ + 3625, + 3626, + 3629, + 3630, + 3636, + 3635, + 3637, + 3639, + 3642, + 3644, + 3643, + 3650, + 3649, + 3676, + 3675, + 3677, + 3678, + 3680, + 3679, + 3681, + 3683, + 3682, + 3684, + 3685, + 3686, + 3689, + 3688, + 3687, + 3690, + 3693, + 3692, + 3691, + 3695, + 3694, + 3696, + 3699, + 3698, + 3697, + 3700, + 3701, + 3703, + 3702, + 3704, + 3707, + 3706, + 3705, + 3709, + 3708, + 3711, + 3710, + 3712, + 3715, + 3714, + 3713, + 3716, + 3719, + 3718, + 3717, + 3723, + 3722, + 3721, + 3720, + 3724, + 3727, + 3726, + 3725, + 3731, + 3730, + 3729, + 3728, + 3733, + 3732, + 3737, + 3740, + 3739, + 3738, + 3741, + 3744, + 3743, + 3742, + 3745, + 3748, + 3747, + 3746, + 3749, + 3751, + 3750, + 3754, + 3753, + 3752, + 3756, + 3755, + 3760, + 3759, + 3758, + 3757, + 3761, + 3764, + 3763, + 3762, + 3765, + 3767, + 3766, + 3768, + 3769, + 3781, + 3782, + 3783, + 3784, + 3786, + 3785, + 3787, + 3788, + 3790, + 3789, + 3791, + 3809, + 3811, + 3810, + 3812, + 3814, + 3813, + 3817, + 3816, + 3815, + 3999, + 4000, + 4001, + 4003, + 4004, + 4006, + 4005, + 4098, + 4099, + 4101, + 4100, + 4103, + 4102, + 4104, + 4105, + 4106, + 4108, + 4107, + 4154, + 4155, + 4156, + 4157, + 4159, + 4158, + 4160, + 4162, + 4161, + 4164, + 4163, + 5728, + 5731, + 5730, + 5729, + 5732, + 5733, + 5734, + 5737, + 5736, + 5735, + 5738, + 5742, + 5741, + 5740, + 5739, + 5743, + 5744, + 5745, + 5746, + 5747, + 5748, + 5749, + 5751, + 5750, + 5752, + 5753, + 5754, + 5755, + 5756, + 5757, + 5758, + 5760, + 5759, + 5761, + 5762, + 5763, + 5764, + 5874, + 5873, + 5875, + 5877, + 5876, + 5878, + 5880, + 5879, + 5882, + 5881, + 5883, + 5885, + 5884, + 5887, + 5886, + 5888, + 5889, + 8892, + 8893, + 8894, + 8895, + 8896, + 8935, + 8937, + 8936, + 9020 + ], + "leftToeBase": [ + 5767, + 5766, + 5765, + 5768, + 5771, + 5770, + 5769, + 5772, + 5775, + 5774, + 5773, + 5776, + 5779, + 5778, + 5777, + 5780, + 5783, + 5782, + 5781, + 5784, + 5785, + 5788, + 5787, + 5786, + 5789, + 5792, + 5791, + 5790, + 5794, + 5793, + 5795, + 5796, + 5797, + 5800, + 5799, + 5798, + 5802, + 5801, + 5803, + 5806, + 5805, + 5804, + 5807, + 5808, + 5811, + 5810, + 5809, + 5812, + 5815, + 5814, + 5813, + 5816, + 5818, + 5817, + 5819, + 5820, + 5823, + 5822, + 5821, + 5824, + 5825, + 5826, + 5829, + 5828, + 5827, + 5830, + 5832, + 5831, + 5835, + 5834, + 5833, + 5836, + 5837, + 5838, + 5841, + 5840, + 5839, + 5842, + 5844, + 5843, + 5846, + 5845, + 5847, + 5848, + 5849, + 5850, + 5851, + 5852, + 5853, + 5855, + 5854, + 5856, + 5857, + 5858, + 5859, + 5860, + 5861, + 5862, + 5863, + 5865, + 5864, + 5866, + 5867, + 5868, + 5869, + 5870, + 5871, + 5872, + 5890, + 5893, + 5895, + 5897, + 5899, + 5901, + 5903, + 5904, + 5906, + 5908, + 5911, + 5912, + 5914, + 5916 + ], + "leftFoot": [ + 5882, + 5881, + 5883, + 5885, + 5884, + 5887, + 5886, + 5888, + 5889, + 5890, + 5893, + 5892, + 5891, + 5895, + 5894, + 5897, + 5896, + 5899, + 5898, + 5901, + 5900, + 5903, + 5902, + 5904, + 5905, + 5906, + 5907, + 5908, + 5909, + 5910, + 5911, + 5912, + 5913, + 5914, + 5915, + 5916, + 5917, + 5918, + 5919, + 5922, + 5923, + 5924, + 5925, + 5926, + 5927, + 5928, + 5930, + 5929, + 5933, + 8730, + 8729, + 8728, + 8839, + 8840, + 8841, + 8845, + 8844, + 8843, + 8842, + 8847, + 8846, + 8848, + 8849, + 8850, + 8851, + 8852, + 8853, + 8855, + 8854, + 8856, + 8857, + 8858, + 8859, + 8860, + 8861, + 8862, + 8863, + 8865, + 8864, + 8866, + 8867, + 8868, + 8870, + 8869, + 8872, + 8871, + 8874, + 8873, + 8875, + 8876, + 8877, + 8878, + 8879, + 8880, + 8881, + 8882, + 8883, + 8884, + 8886, + 8885, + 8887, + 8888, + 8889, + 8890, + 8891, + 8892, + 8893, + 8894, + 8895, + 8896, + 8897, + 8898, + 8899, + 8900, + 8901, + 8902, + 8903, + 8904, + 8905, + 8906, + 8907, + 8908, + 8909, + 8910, + 8911, + 8912, + 8913, + 8914, + 8915, + 8916, + 8917, + 8918, + 8919, + 8920, + 8921, + 8922, + 8924, + 8923, + 8925, + 8929, + 8930, + 8934, + 8935 + ], + "spine1": [ + 3231, + 3230, + 3229, + 3228, + 3240, + 3243, + 3242, + 3241, + 3247, + 3246, + 3245, + 3244, + 3248, + 3251, + 3250, + 3249, + 3272, + 3273, + 3276, + 3277, + 3283, + 3282, + 3288, + 3291, + 3290, + 3289, + 3301, + 3300, + 3299, + 3298, + 3314, + 3317, + 3316, + 3315, + 3318, + 3320, + 3319, + 3321, + 3322, + 3352, + 3357, + 3356, + 3355, + 3369, + 3384, + 3383, + 3393, + 3394, + 3400, + 3399, + 3427, + 3426, + 3521, + 3524, + 3523, + 3522, + 3556, + 3555, + 3557, + 3559, + 3558, + 3571, + 3570, + 3573, + 3572, + 3824, + 3827, + 3826, + 3825, + 3829, + 3828, + 3833, + 3830, + 3838, + 3837, + 3836, + 3844, + 3873, + 3892, + 3893, + 3897, + 3896, + 3908, + 3909, + 3910, + 3855, + 3856, + 3981, + 3982, + 3985, + 4052, + 4053, + 4054, + 4057, + 4056, + 4058, + 4070, + 4069, + 4392, + 4393, + 4394, + 5417, + 5418, + 5419, + 5420, + 5422, + 5421, + 5423, + 5424, + 5425, + 5426, + 5428, + 5427, + 5429, + 5449, + 5448, + 5459, + 5483, + 5485, + 5486, + 5489, + 5632, + 5634, + 5635, + 5638, + 5639, + 5642, + 5645, + 5644, + 5646, + 5647, + 5648, + 5531, + 5532, + 5534, + 9026, + 5944, + 5950, + 5994, + 5991, + 5992, + 5993, + 6003, + 6004, + 6005, + 6006, + 6010, + 6007, + 6008, + 6009, + 6011, + 6012, + 6013, + 6014, + 6035, + 6036, + 6039, + 6040, + 6045, + 6046, + 6051, + 6052, + 6053, + 6054, + 6064, + 6061, + 6062, + 6063, + 6077, + 6078, + 6079, + 6080, + 6081, + 6082, + 6083, + 6084, + 6085, + 6115, + 6116, + 6117, + 6118, + 6130, + 6144, + 6145, + 6154, + 6155, + 6160, + 6161, + 6187, + 6188, + 6282, + 6283, + 6284, + 6285, + 6316, + 6317, + 6318, + 6319, + 6320, + 6332, + 6331, + 6333, + 6334, + 6581, + 6582, + 6583, + 6584, + 6586, + 6585, + 6587, + 6588, + 6591, + 6592, + 6593, + 6599, + 6624, + 6640, + 6641, + 6645, + 6644, + 6656, + 6657, + 6658, + 6729, + 6730, + 6733, + 6798, + 6799, + 6800, + 6802, + 6803, + 6804, + 6813, + 6814, + 7128, + 7129, + 7130, + 8151, + 8152, + 8153, + 8154, + 8155, + 8156, + 8157, + 8158, + 8159, + 8160, + 8161, + 8162, + 8163, + 8182, + 8183, + 8193, + 8217, + 8218, + 8326, + 8328, + 8329, + 8332, + 8333, + 8336, + 8338, + 8339, + 8340, + 8341, + 8342, + 8726 + ], + "spine2": [ + 3210, + 3211, + 3217, + 3216, + 3215, + 3214, + 3218, + 3221, + 3220, + 3219, + 3222, + 3223, + 3224, + 3227, + 3226, + 3225, + 3232, + 3235, + 3234, + 3233, + 3236, + 3239, + 3238, + 3237, + 3252, + 3255, + 3254, + 3253, + 3271, + 3270, + 3269, + 3268, + 3275, + 3274, + 3278, + 3281, + 3280, + 3279, + 3296, + 3297, + 3305, + 3304, + 3303, + 3302, + 3312, + 3311, + 3310, + 3313, + 3323, + 3324, + 3325, + 3328, + 3327, + 3326, + 3330, + 3329, + 3332, + 3331, + 3333, + 3334, + 3343, + 3342, + 3345, + 3347, + 3346, + 3358, + 3361, + 3360, + 3359, + 3362, + 3365, + 3364, + 3363, + 3367, + 3368, + 3372, + 3371, + 3370, + 3373, + 3374, + 3376, + 3375, + 3377, + 3380, + 3379, + 3378, + 3381, + 3382, + 3387, + 3386, + 3385, + 3388, + 3391, + 3390, + 3389, + 3392, + 3396, + 3395, + 3435, + 3438, + 3437, + 3436, + 3443, + 3446, + 3445, + 3444, + 3450, + 3449, + 3452, + 3451, + 3453, + 3526, + 3525, + 3560, + 3561, + 3834, + 3835, + 3847, + 3846, + 3848, + 3853, + 3850, + 3849, + 3857, + 3854, + 3872, + 3874, + 3895, + 3894, + 3912, + 3911, + 3913, + 3924, + 3923, + 3922, + 3925, + 3927, + 3926, + 3931, + 3930, + 3929, + 3928, + 3932, + 3935, + 3934, + 3933, + 3937, + 3936, + 3938, + 3939, + 3940, + 3943, + 3942, + 3941, + 3831, + 3832, + 3944, + 3945, + 3946, + 3980, + 3979, + 3983, + 3984, + 4032, + 4049, + 4051, + 4050, + 4055, + 4059, + 4068, + 4071, + 4136, + 4137, + 4168, + 4169, + 4175, + 4174, + 4391, + 4279, + 4280, + 4396, + 4395, + 4398, + 4397, + 4399, + 4426, + 4429, + 4428, + 4427, + 4434, + 4435, + 4436, + 4438, + 4437, + 4452, + 4454, + 4453, + 4455, + 4457, + 4456, + 4486, + 4497, + 4498, + 5395, + 5396, + 5430, + 5433, + 5432, + 5431, + 5434, + 5435, + 5436, + 5437, + 5438, + 5439, + 5440, + 5441, + 5442, + 5444, + 5443, + 5445, + 5446, + 5447, + 5450, + 5454, + 5453, + 5349, + 5350, + 5457, + 5458, + 5460, + 5461, + 5462, + 5480, + 5481, + 5482, + 5522, + 5521, + 5523, + 5524, + 5530, + 5526, + 5525, + 5536, + 5547, + 5548, + 5549, + 5550, + 5551, + 5552, + 5553, + 5554, + 5556, + 5555, + 5559, + 5558, + 5561, + 5560, + 5562, + 5563, + 5565, + 5564, + 5567, + 5566, + 5569, + 5568, + 5570, + 5571, + 5599, + 5598, + 5611, + 5612, + 5484, + 5487, + 5633, + 5499, + 5500, + 5501, + 5637, + 5636, + 5641, + 5640, + 5643, + 5519, + 5651, + 5650, + 5652, + 5654, + 5653, + 5655, + 5656, + 5528, + 5529, + 5657, + 5533, + 5618, + 5619, + 5621, + 5920, + 5921, + 5932, + 9027, + 5935, + 5936, + 5937, + 5938, + 5945, + 5947, + 5973, + 5974, + 5980, + 5977, + 5978, + 5979, + 5981, + 5982, + 5983, + 5984, + 5985, + 5986, + 5987, + 5988, + 5989, + 5990, + 5995, + 5996, + 5997, + 5998, + 5999, + 6000, + 6001, + 6002, + 6015, + 6016, + 6017, + 6018, + 6034, + 6031, + 6032, + 6033, + 6037, + 6038, + 6041, + 6042, + 6043, + 6044, + 6059, + 6060, + 6068, + 6065, + 6066, + 6067, + 6075, + 6076, + 6073, + 6074, + 6086, + 6087, + 6088, + 6089, + 6090, + 6091, + 6092, + 6093, + 6095, + 6094, + 6096, + 6097, + 6108, + 6105, + 6106, + 6110, + 6109, + 6119, + 6120, + 6121, + 6122, + 6123, + 6124, + 6125, + 6126, + 6128, + 6129, + 6133, + 6134, + 6131, + 6132, + 6135, + 6136, + 6137, + 6138, + 6139, + 6140, + 6141, + 6142, + 6143, + 6148, + 6149, + 6146, + 6147, + 6152, + 6153, + 6150, + 6151, + 6156, + 6157, + 6196, + 6197, + 6198, + 6199, + 6204, + 6205, + 6206, + 6207, + 6211, + 6210, + 6212, + 6213, + 6214, + 6286, + 6287, + 6321, + 6322, + 6589, + 6590, + 6601, + 6602, + 6603, + 6604, + 6605, + 6606, + 6608, + 6607, + 6623, + 6625, + 6642, + 6643, + 6661, + 6659, + 6660, + 6672, + 6673, + 6670, + 6671, + 6674, + 6675, + 6679, + 6676, + 6677, + 6678, + 6680, + 6681, + 6682, + 6683, + 6685, + 6684, + 6686, + 6687, + 6688, + 6689, + 6690, + 6691, + 6692, + 6693, + 6694, + 6728, + 6727, + 6731, + 6732, + 6779, + 6795, + 6796, + 6797, + 6801, + 6805, + 6812, + 6815, + 6881, + 6880, + 6912, + 6913, + 6919, + 6918, + 7127, + 7131, + 7132, + 7133, + 7134, + 7135, + 7162, + 7165, + 7163, + 7164, + 7170, + 7171, + 7172, + 7173, + 7174, + 7188, + 7190, + 7189, + 7191, + 7192, + 7193, + 7222, + 7233, + 7234, + 8129, + 8130, + 8164, + 8165, + 8166, + 8167, + 8168, + 8169, + 8170, + 8171, + 8172, + 8173, + 8174, + 8175, + 8176, + 8177, + 8178, + 8179, + 8180, + 8181, + 8184, + 8187, + 8188, + 8191, + 8192, + 8194, + 8195, + 8196, + 8214, + 8215, + 8216, + 8242, + 8241, + 8243, + 8244, + 8245, + 8246, + 8247, + 8249, + 8260, + 8261, + 8262, + 8263, + 8264, + 8265, + 8266, + 8267, + 8268, + 8269, + 8270, + 8271, + 8273, + 8272, + 8274, + 8275, + 8276, + 8277, + 8279, + 8278, + 8280, + 8281, + 8282, + 8283, + 8308, + 8307, + 8316, + 8317, + 8327, + 8330, + 8331, + 8334, + 8335, + 8337, + 8344, + 8345, + 8346, + 8347, + 8348, + 8349, + 8350, + 8351, + 8727 + ], + "leftShoulder": [ + 3219, + 3234, + 3233, + 3236, + 3237, + 3267, + 3266, + 3265, + 3264, + 3303, + 3336, + 3339, + 3338, + 3337, + 3341, + 3340, + 3344, + 3343, + 3345, + 3346, + 3362, + 3363, + 3366, + 3367, + 3413, + 3415, + 3414, + 3875, + 3878, + 3877, + 3876, + 3880, + 3883, + 3882, + 3881, + 3930, + 3929, + 3935, + 3955, + 3954, + 3953, + 4032, + 4035, + 4034, + 4033, + 4143, + 4167, + 4174, + 4426, + 4428, + 4427, + 4430, + 4433, + 4432, + 4431, + 4436, + 4438, + 4439, + 4442, + 4441, + 4440, + 4443, + 4446, + 4445, + 4444, + 4447, + 4450, + 4449, + 4448, + 4451, + 4455, + 4459, + 4458, + 4462, + 4461, + 4460, + 4465, + 4464, + 4463, + 4466, + 4467, + 4471, + 4470, + 4469, + 4468, + 4475, + 4474, + 4473, + 4472, + 4476, + 4477, + 4479, + 4482, + 4481, + 4480, + 4491, + 4490, + 4498, + 4499, + 4502, + 4505, + 4504, + 4503, + 4509, + 4508, + 4511, + 4513, + 4512, + 4515, + 4514, + 4516, + 4517, + 5456, + 5455, + 5457, + 5462, + 5463, + 5464, + 5465, + 5466, + 5470, + 5469, + 5468, + 5467, + 5479, + 5535, + 5538, + 5537, + 5536, + 5540, + 5539, + 5541, + 5542, + 5543, + 5544, + 5545, + 5546, + 5563, + 5564, + 5566, + 5605, + 5602, + 5606, + 5607, + 5608, + 5609, + 5610, + 5624, + 5625, + 5626, + 5627 + ], + "rightShoulder": [ + 5982, + 5996, + 5997, + 5999, + 6000, + 6030, + 6027, + 6028, + 6029, + 6066, + 6099, + 6100, + 6101, + 6102, + 6103, + 6104, + 6107, + 6108, + 6106, + 6109, + 6123, + 6124, + 6127, + 6128, + 6174, + 6175, + 6176, + 6626, + 6627, + 6628, + 6629, + 6630, + 6631, + 6632, + 6633, + 6677, + 6678, + 6683, + 6703, + 6701, + 6702, + 6779, + 6780, + 6781, + 6782, + 6887, + 6911, + 6918, + 7162, + 7163, + 7164, + 7166, + 7167, + 7168, + 7169, + 7172, + 7174, + 7175, + 7176, + 7177, + 7178, + 7179, + 7180, + 7181, + 7182, + 7183, + 7184, + 7185, + 7186, + 7187, + 7191, + 7195, + 7194, + 7196, + 7197, + 7198, + 7201, + 7202, + 7199, + 7200, + 7203, + 7207, + 7204, + 7205, + 7206, + 7211, + 7208, + 7209, + 7210, + 7212, + 7213, + 7215, + 7216, + 7217, + 7218, + 7226, + 7227, + 7234, + 7235, + 7238, + 7239, + 7240, + 7241, + 7245, + 7244, + 7247, + 7248, + 7249, + 7250, + 7251, + 7252, + 7253, + 8189, + 8190, + 8191, + 8196, + 8197, + 8198, + 8199, + 8200, + 8204, + 8201, + 8202, + 8203, + 8213, + 8248, + 8249, + 8250, + 8251, + 8253, + 8252, + 8254, + 8255, + 8256, + 8257, + 8258, + 8259, + 8275, + 8276, + 8278, + 8310, + 8309, + 8311, + 8312, + 8313, + 8314, + 8315, + 8318, + 8319, + 8320, + 8321 + ], + "rightFoot": [ + 8575, + 8576, + 8577, + 8578, + 8579, + 8580, + 8581, + 8582, + 8583, + 8584, + 8585, + 8586, + 8587, + 8588, + 8589, + 8590, + 8591, + 8592, + 8593, + 8594, + 8595, + 8596, + 8597, + 8598, + 8599, + 8600, + 8601, + 8602, + 8603, + 8604, + 8605, + 8606, + 8607, + 8608, + 8609, + 8610, + 8611, + 8612, + 8613, + 8614, + 8615, + 8616, + 8617, + 8618, + 8619, + 8620, + 8621, + 8622, + 8623, + 8624, + 8625, + 8626, + 8627, + 8628, + 8629, + 8633, + 8630, + 8631, + 8632, + 8634, + 8635, + 8636, + 8637, + 8638, + 8639, + 8640, + 8641, + 8642, + 8643, + 8644, + 8645, + 8646, + 8647, + 8648, + 8649, + 8650, + 8651, + 8652, + 8653, + 8654, + 8655, + 8656, + 8657, + 8658, + 8659, + 8660, + 8661, + 8662, + 8663, + 8664, + 8665, + 8666, + 8667, + 8668, + 8669, + 8670, + 8671, + 8672, + 8673, + 8674, + 8675, + 8676, + 8677, + 8678, + 8679, + 8680, + 8681, + 8682, + 8683, + 8684, + 8685, + 8686, + 8687, + 8688, + 8689, + 8690, + 8691, + 8692, + 8693, + 8694, + 8695, + 8696, + 8697, + 8698, + 8699, + 8700, + 8701, + 8702, + 8703, + 8704, + 8705, + 8706, + 8707, + 8708, + 8709, + 8710, + 8711, + 8712, + 8713, + 8714, + 8715, + 8716, + 8717 + ], + "rightArm": [ + 6019, + 6020, + 6021, + 6022, + 6030, + 6029, + 6075, + 6074, + 6110, + 6111, + 6112, + 6109, + 6162, + 6163, + 6164, + 6165, + 6168, + 6169, + 6166, + 6167, + 6170, + 6171, + 6172, + 6173, + 6180, + 6177, + 6178, + 6179, + 6182, + 6181, + 6183, + 6184, + 6185, + 6186, + 6619, + 6620, + 6621, + 6622, + 6646, + 6647, + 6648, + 6649, + 6660, + 6668, + 6669, + 6696, + 6695, + 6699, + 6700, + 6724, + 6721, + 6722, + 6723, + 6737, + 6738, + 6735, + 6736, + 6754, + 6755, + 6756, + 6757, + 6758, + 6759, + 6760, + 6761, + 6762, + 6763, + 6764, + 6765, + 6766, + 6767, + 6768, + 6769, + 6770, + 6771, + 6772, + 6773, + 6774, + 6775, + 6776, + 6777, + 6778, + 6781, + 6782, + 6783, + 6784, + 6785, + 6786, + 6787, + 6788, + 6789, + 6790, + 6791, + 6792, + 6793, + 6794, + 6806, + 6807, + 6811, + 6808, + 6809, + 6810, + 6816, + 6817, + 6818, + 6819, + 6823, + 6820, + 6821, + 6822, + 6879, + 6882, + 6883, + 6884, + 6885, + 6886, + 6887, + 6914, + 6915, + 6916, + 6917, + 6918, + 6993, + 6994, + 6995, + 6996, + 7005, + 7006, + 7007, + 7008, + 7012, + 7009, + 7010, + 7011, + 7013, + 7014, + 7015, + 7016, + 7019, + 7020, + 7021, + 7022, + 7023, + 7024, + 7025, + 7026, + 7027, + 7028, + 7029, + 7030, + 7031, + 7032, + 7035, + 7036, + 7039, + 7040, + 7041, + 7042, + 7043, + 7044, + 7045, + 7046, + 7047, + 7048, + 7049, + 7050, + 7051, + 7052, + 7054, + 7053, + 7056, + 7055, + 7057, + 7058, + 7070, + 7071, + 7072, + 7077, + 7078, + 7079, + 7080, + 7082, + 7081, + 7086, + 7083, + 7084, + 7085, + 7087, + 7089, + 7088, + 7090, + 7091, + 7092, + 7093, + 7094, + 7099, + 7105, + 7106, + 7107, + 7108, + 7109, + 7111, + 7113, + 7114, + 7119, + 7120, + 7121, + 7122, + 7123, + 7125, + 7134, + 7185, + 7186, + 7196, + 7201, + 7200, + 7207, + 7206, + 7211, + 7210, + 7212, + 7214, + 7219, + 7220, + 7221, + 7223, + 7224, + 7225, + 7228, + 7229, + 7232, + 7230, + 7231, + 7236, + 7237, + 7242, + 7243, + 7246, + 7254, + 7256, + 7257, + 7255, + 7258, + 7259, + 8131, + 8132, + 8133, + 8134, + 8205, + 8207, + 8206, + 8208, + 8209, + 8210, + 8211, + 8212, + 8213, + 8255, + 8256, + 8286, + 8287, + 8284, + 8285, + 8288, + 8290, + 8291, + 8289, + 8292, + 8293, + 8294, + 8297, + 8295, + 8296, + 8298, + 8299, + 8300, + 8301, + 8302, + 8303, + 8304, + 8305, + 8306, + 8312, + 8322 + ], + "leftHandIndex1": [ + 4643, + 4642, + 4641, + 4644, + 4653, + 4652, + 4651, + 4654, + 4669, + 4682, + 4681, + 4739, + 4738, + 4737, + 4740, + 4743, + 4742, + 4741, + 4744, + 4745, + 4759, + 4760, + 4768, + 4767, + 4766, + 4772, + 4771, + 4770, + 4773, + 4776, + 4775, + 4774, + 4777, + 4778, + 4779, + 4782, + 4781, + 4780, + 4783, + 4792, + 4791, + 4793, + 4795, + 4800, + 4801, + 4802, + 4805, + 4819, + 4818, + 4831, + 4830, + 4829, + 4832, + 4833, + 4834, + 4847, + 4846, + 4859, + 4861, + 4860, + 4872, + 4874, + 4875, + 4876, + 4877, + 4883, + 4884, + 4887, + 4886, + 4888, + 4891, + 4890, + 4895, + 4894, + 4896, + 4897, + 4906, + 4905, + 4907, + 4908, + 4910, + 4909, + 4911, + 4912, + 4913, + 4914, + 4915, + 4916, + 4917, + 4918, + 4921, + 4920, + 4919, + 4922, + 4923, + 4924, + 4926, + 4925, + 4928, + 4927, + 4929, + 4930, + 4931, + 4934, + 4933, + 4932, + 4935, + 4936, + 4937, + 4938, + 4939, + 4941, + 4940, + 4942, + 4944, + 4943, + 4946, + 4945, + 4947, + 4948, + 4949, + 4950, + 4951, + 4952, + 4953, + 4954, + 4955, + 4956, + 4957, + 4958, + 4959, + 4960, + 4961, + 4962, + 4964, + 4963, + 4965, + 4966, + 4967, + 4970, + 4969, + 4968, + 4973, + 4972, + 4971, + 4974, + 4977, + 4976, + 4975, + 4978, + 4980, + 4979, + 4983, + 4982, + 4981, + 4984, + 4986, + 4985, + 4987, + 4988, + 4991, + 4990, + 4989, + 4992, + 4994, + 4993, + 4995, + 4998, + 4997, + 4996, + 4999, + 5000, + 5001, + 5002, + 5004, + 5003, + 5007, + 5006, + 5005, + 5008, + 5009, + 5010, + 5012, + 5011, + 5013, + 5014, + 5016, + 5015, + 5018, + 5017, + 5019, + 5020, + 5022, + 5021, + 5023, + 5024, + 5025, + 5026, + 5027, + 5028, + 5029, + 5030, + 5033, + 5032, + 5031, + 5034, + 5035, + 5036, + 5038, + 5037, + 5040, + 5039, + 5041, + 5042, + 5043, + 5044, + 5045, + 5046, + 5047, + 5048, + 5049, + 5051, + 5050, + 5052, + 5054, + 5053, + 5056, + 5055, + 5057, + 5059, + 5058, + 5060, + 5061, + 5062, + 5063, + 5064, + 5065, + 5066, + 5067, + 5068, + 5069, + 5070, + 5071, + 5072, + 5073, + 5074, + 5076, + 5075, + 5077, + 5078, + 5079, + 5082, + 5081, + 5080, + 5085, + 5084, + 5083, + 5086, + 5089, + 5088, + 5087, + 5090, + 5092, + 5091, + 5095, + 5094, + 5093, + 5096, + 5097, + 5098, + 5101, + 5100, + 5099, + 5102, + 5103, + 5104, + 5105, + 5108, + 5107, + 5106, + 5109, + 5110, + 5111, + 5112, + 5114, + 5113, + 5117, + 5116, + 5115, + 5118, + 5119, + 5120, + 5122, + 5121, + 5123, + 5124, + 5125, + 5127, + 5126, + 5129, + 5128, + 5130, + 5131, + 5133, + 5132, + 5134, + 5135, + 5136, + 5137, + 5138, + 5139, + 5140, + 5141, + 5144, + 5143, + 5142, + 5145, + 5146, + 5147, + 5149, + 5148, + 5151, + 5150, + 5152, + 5153, + 5154, + 5155, + 5156, + 5157, + 5158, + 5159, + 5160, + 5162, + 5161, + 5163, + 5165, + 5164, + 5167, + 5166, + 5168, + 5170, + 5169, + 5171, + 5172, + 5173, + 5174, + 5175, + 5176, + 5177, + 5178, + 5179, + 5180, + 5181, + 5182, + 5183, + 5184, + 5185, + 5187, + 5186, + 5188, + 5189, + 5190, + 5193, + 5192, + 5191, + 5194, + 5197, + 5196, + 5195, + 5198, + 5201, + 5200, + 5199, + 5202, + 5204, + 5203, + 5207, + 5206, + 5205, + 5208, + 5210, + 5209, + 5213, + 5214, + 5217, + 5216, + 5215, + 5218, + 5220, + 5219, + 5223, + 5226, + 5225, + 5224, + 5227, + 5228, + 5229, + 5230, + 5232, + 5231, + 5235, + 5234, + 5233, + 5236, + 5237, + 5238, + 5240, + 5239, + 5241, + 5242, + 5244, + 5243, + 5246, + 5245, + 5247, + 5248, + 5250, + 5249, + 5251, + 5252, + 5253, + 5254, + 5255, + 5256, + 5257, + 5258, + 5261, + 5260, + 5259, + 5262, + 5263, + 5264, + 5266, + 5265, + 5268, + 5267, + 5269, + 5270, + 5271, + 5272, + 5273, + 5274, + 5275, + 5276, + 5277, + 5279, + 5278, + 5280, + 5282, + 5281, + 5284, + 5283, + 5285, + 5287, + 5286, + 5288, + 5289, + 5290, + 5291, + 5292, + 5293, + 5294, + 5295, + 5296, + 5297, + 5298, + 5299, + 5300, + 5301, + 5302, + 5304, + 5303, + 5305, + 5306, + 5307, + 5310, + 5309, + 5308 + ], + "rightLeg": [ + 6386, + 6387, + 6390, + 6391, + 6396, + 6397, + 6398, + 6400, + 6403, + 6404, + 6405, + 6410, + 6411, + 6437, + 6436, + 6438, + 6439, + 6440, + 6441, + 6442, + 6443, + 6444, + 6445, + 6446, + 6447, + 6448, + 6449, + 6450, + 6451, + 6452, + 6453, + 6454, + 6455, + 6456, + 6457, + 6460, + 6458, + 6459, + 6461, + 6462, + 6463, + 6464, + 6465, + 6466, + 6467, + 6468, + 6469, + 6470, + 6471, + 6472, + 6473, + 6474, + 6475, + 6476, + 6477, + 6478, + 6479, + 6480, + 6484, + 6481, + 6482, + 6483, + 6485, + 6486, + 6487, + 6488, + 6492, + 6489, + 6490, + 6491, + 6494, + 6493, + 6495, + 6496, + 6497, + 6498, + 6499, + 6500, + 6501, + 6502, + 6503, + 6504, + 6505, + 6506, + 6507, + 6508, + 6509, + 6510, + 6511, + 6512, + 6513, + 6514, + 6518, + 6515, + 6516, + 6517, + 6519, + 6520, + 6521, + 6522, + 6523, + 6524, + 6525, + 6526, + 6527, + 6539, + 6540, + 6541, + 6542, + 6543, + 6544, + 6545, + 6546, + 6547, + 6548, + 6549, + 6566, + 6567, + 6568, + 6569, + 6570, + 6571, + 6574, + 6572, + 6573, + 6747, + 6748, + 6749, + 6750, + 6751, + 6752, + 6753, + 6842, + 6843, + 6844, + 6845, + 6846, + 6847, + 6848, + 6849, + 6850, + 6852, + 6851, + 6898, + 6899, + 6900, + 6901, + 6902, + 6903, + 6904, + 6905, + 6906, + 6908, + 6907, + 8422, + 8423, + 8424, + 8425, + 8426, + 8427, + 8428, + 8429, + 8430, + 8431, + 8432, + 8436, + 8433, + 8434, + 8435, + 8437, + 8438, + 8439, + 8440, + 8441, + 8442, + 8443, + 8444, + 8445, + 8446, + 8447, + 8448, + 8449, + 8450, + 8451, + 8452, + 8453, + 8454, + 8455, + 8456, + 8457, + 8458, + 8567, + 8568, + 8569, + 8570, + 8571, + 8572, + 8573, + 8574, + 8575, + 8576, + 8577, + 8578, + 8579, + 8580, + 8581, + 8582, + 8583, + 8680, + 8681, + 8682, + 8683, + 8684, + 8717, + 8718, + 8719, + 8720 + ], + "rightHandIndex1": [ + 7379, + 7380, + 7377, + 7378, + 7389, + 7390, + 7387, + 7388, + 7405, + 7418, + 7417, + 7475, + 7476, + 7473, + 7474, + 7479, + 7480, + 7477, + 7478, + 7481, + 7496, + 7495, + 7504, + 7502, + 7503, + 7508, + 7509, + 7506, + 7507, + 7512, + 7513, + 7510, + 7511, + 7514, + 7515, + 7518, + 7519, + 7516, + 7517, + 7528, + 7529, + 7527, + 7531, + 7536, + 7537, + 7541, + 7538, + 7555, + 7554, + 7567, + 7568, + 7565, + 7566, + 7569, + 7570, + 7582, + 7583, + 7595, + 7596, + 7597, + 7608, + 7610, + 7611, + 7613, + 7612, + 7619, + 7620, + 7622, + 7623, + 7624, + 7626, + 7627, + 7631, + 7630, + 7632, + 7633, + 7643, + 7641, + 7642, + 7644, + 7645, + 7646, + 7647, + 7648, + 7649, + 7650, + 7651, + 7652, + 7653, + 7654, + 7657, + 7658, + 7655, + 7656, + 7660, + 7659, + 7661, + 7662, + 7665, + 7663, + 7664, + 7666, + 7667, + 7670, + 7671, + 7668, + 7669, + 7672, + 7673, + 7674, + 7675, + 7677, + 7676, + 7678, + 7680, + 7679, + 7681, + 7682, + 7683, + 7684, + 7685, + 7686, + 7687, + 7688, + 7690, + 7689, + 7691, + 7692, + 7693, + 7694, + 7695, + 7696, + 7697, + 7698, + 7700, + 7699, + 7701, + 7702, + 7703, + 7706, + 7704, + 7705, + 7709, + 7710, + 7707, + 7708, + 7713, + 7714, + 7711, + 7712, + 7716, + 7715, + 7719, + 7720, + 7717, + 7718, + 7721, + 7722, + 7724, + 7723, + 7727, + 7728, + 7725, + 7726, + 7729, + 7730, + 7731, + 7734, + 7735, + 7732, + 7733, + 7736, + 7737, + 7738, + 7740, + 7739, + 7743, + 7744, + 7741, + 7742, + 7745, + 7746, + 7747, + 7748, + 7749, + 7750, + 7751, + 7752, + 7754, + 7753, + 7755, + 7756, + 7757, + 7758, + 7759, + 7760, + 7761, + 7762, + 7763, + 7764, + 7765, + 7766, + 7769, + 7770, + 7767, + 7768, + 7772, + 7771, + 7773, + 7774, + 7777, + 7775, + 7776, + 7778, + 7780, + 7779, + 7781, + 7782, + 7783, + 7784, + 7785, + 7787, + 7786, + 7788, + 7790, + 7789, + 7791, + 7792, + 7793, + 7794, + 7795, + 7796, + 7797, + 7798, + 7799, + 7800, + 7802, + 7801, + 7803, + 7804, + 7805, + 7806, + 7807, + 7808, + 7809, + 7810, + 7812, + 7811, + 7813, + 7814, + 7815, + 7818, + 7816, + 7817, + 7821, + 7822, + 7819, + 7820, + 7825, + 7826, + 7823, + 7824, + 7828, + 7827, + 7831, + 7832, + 7829, + 7830, + 7834, + 7833, + 7837, + 7838, + 7835, + 7836, + 7840, + 7839, + 7841, + 7844, + 7845, + 7842, + 7843, + 7846, + 7847, + 7848, + 7850, + 7849, + 7853, + 7854, + 7851, + 7852, + 7855, + 7856, + 7857, + 7858, + 7859, + 7860, + 7861, + 7862, + 7863, + 7865, + 7864, + 7866, + 7867, + 7868, + 7869, + 7870, + 7871, + 7872, + 7873, + 7874, + 7875, + 7876, + 7877, + 7880, + 7881, + 7878, + 7879, + 7883, + 7882, + 7884, + 7885, + 7888, + 7886, + 7887, + 7889, + 7891, + 7890, + 7892, + 7893, + 7894, + 7895, + 7896, + 7898, + 7897, + 7899, + 7901, + 7900, + 7902, + 7903, + 7904, + 7905, + 7906, + 7907, + 7908, + 7909, + 7910, + 7911, + 7913, + 7912, + 7914, + 7915, + 7916, + 7917, + 7918, + 7919, + 7920, + 7921, + 7923, + 7922, + 7924, + 7925, + 7926, + 7929, + 7927, + 7928, + 7930, + 7933, + 7934, + 7931, + 7932, + 7937, + 7938, + 7935, + 7936, + 7940, + 7939, + 7943, + 7944, + 7941, + 7942, + 7945, + 7946, + 7950, + 7949, + 7953, + 7954, + 7951, + 7952, + 7955, + 7956, + 7959, + 7962, + 7963, + 7960, + 7961, + 7964, + 7965, + 7966, + 7968, + 7967, + 7971, + 7972, + 7969, + 7970, + 7973, + 7974, + 7975, + 7976, + 7977, + 7978, + 7979, + 7980, + 7982, + 7981, + 7983, + 7984, + 7985, + 7986, + 7987, + 7988, + 7989, + 7990, + 7991, + 7992, + 7993, + 7994, + 7997, + 7998, + 7995, + 7996, + 8000, + 7999, + 8001, + 8002, + 8005, + 8003, + 8004, + 8006, + 8008, + 8007, + 8009, + 8010, + 8011, + 8012, + 8013, + 8015, + 8014, + 8016, + 8018, + 8017, + 8019, + 8020, + 8021, + 8022, + 8023, + 8024, + 8025, + 8026, + 8027, + 8028, + 8030, + 8029, + 8031, + 8032, + 8033, + 8034, + 8035, + 8036, + 8037, + 8038, + 8040, + 8039, + 8041, + 8042, + 8043, + 8046, + 8044, + 8045 + ], + "leftForeArm": [ + 4176, + 4179, + 4178, + 4177, + 4180, + 4181, + 4182, + 4185, + 4184, + 4183, + 4186, + 4189, + 4188, + 4187, + 4190, + 4193, + 4192, + 4191, + 4194, + 4197, + 4196, + 4195, + 4198, + 4201, + 4200, + 4199, + 4202, + 4205, + 4204, + 4203, + 4207, + 4206, + 4208, + 4211, + 4210, + 4209, + 4212, + 4215, + 4214, + 4213, + 4216, + 4218, + 4217, + 4219, + 4222, + 4221, + 4220, + 4223, + 4226, + 4225, + 4224, + 4227, + 4228, + 4229, + 4232, + 4231, + 4230, + 4233, + 4236, + 4235, + 4234, + 4238, + 4237, + 4239, + 4242, + 4241, + 4240, + 4243, + 4246, + 4245, + 4244, + 4247, + 4248, + 4252, + 4251, + 4253, + 4256, + 4255, + 4254, + 4257, + 4260, + 4259, + 4258, + 4273, + 4274, + 4278, + 4277, + 4284, + 4283, + 4288, + 4287, + 4293, + 4290, + 4289, + 4294, + 4296, + 4295, + 4299, + 4301, + 4300, + 4302, + 4323, + 4326, + 4325, + 4324, + 4328, + 4327, + 4330, + 4329, + 4331, + 4332, + 4333, + 4340, + 4339, + 4338, + 4337, + 4359, + 4360, + 4361, + 4362, + 4363, + 4364, + 4365, + 4366, + 4367, + 4368, + 4371, + 4374, + 4376, + 4379, + 4380, + 4381, + 4382, + 4388, + 4390, + 4518, + 4523, + 4524, + 4525, + 4527, + 4526, + 4528, + 4529, + 4530, + 4531, + 4532, + 4535, + 4534, + 4533, + 4538, + 4537, + 4536, + 4540, + 4539, + 4541, + 4544, + 4543, + 4542, + 4548, + 4547, + 4546, + 4545, + 4549, + 4552, + 4551, + 4550, + 4553, + 4556, + 4555, + 4554, + 4557, + 4559, + 4558, + 4562, + 4561, + 4560, + 4566, + 4565, + 4564, + 4563, + 4567, + 4569, + 4568, + 4570, + 4571, + 4572, + 4573, + 4574, + 4577, + 4576, + 4575, + 4578, + 4580, + 4579, + 4581, + 4582, + 4583, + 4584, + 4585, + 4586, + 4589, + 4588, + 4587, + 4590, + 4592, + 4591, + 4593, + 4594, + 4632, + 4674, + 4673, + 4686, + 4703, + 4712, + 4713, + 4714, + 4715, + 4716, + 4717, + 4718, + 4719, + 4721, + 4720, + 4722, + 4725, + 4724, + 4723, + 4726, + 4761, + 4762, + 4821, + 4820, + 4823, + 4822, + 4842, + 4844, + 4849, + 4848, + 4855, + 4856, + 4857, + 4858, + 4893, + 4900, + 5451, + 5452 + ], + "rightForeArm": [ + 6920, + 6921, + 6922, + 6923, + 6924, + 6925, + 6926, + 6927, + 6928, + 6929, + 6930, + 6931, + 6932, + 6933, + 6934, + 6935, + 6936, + 6937, + 6938, + 6939, + 6940, + 6941, + 6942, + 6943, + 6944, + 6945, + 6946, + 6947, + 6948, + 6949, + 6950, + 6951, + 6952, + 6953, + 6954, + 6955, + 6956, + 6957, + 6958, + 6959, + 6960, + 6961, + 6962, + 6963, + 6964, + 6965, + 6966, + 6967, + 6968, + 6969, + 6970, + 6971, + 6972, + 6973, + 6974, + 6975, + 6976, + 6977, + 6978, + 6979, + 6980, + 6981, + 6982, + 6983, + 6984, + 6985, + 6986, + 6987, + 6988, + 6989, + 6990, + 6991, + 6992, + 6995, + 6996, + 6997, + 6998, + 6999, + 7000, + 7001, + 7002, + 7003, + 7004, + 7017, + 7018, + 7021, + 7022, + 7025, + 7026, + 7029, + 7030, + 7033, + 7034, + 7031, + 7032, + 7035, + 7036, + 7037, + 7038, + 7039, + 7040, + 7059, + 7060, + 7061, + 7062, + 7063, + 7064, + 7065, + 7066, + 7067, + 7068, + 7069, + 7076, + 7073, + 7074, + 7075, + 7095, + 7096, + 7097, + 7098, + 7099, + 7100, + 7101, + 7102, + 7103, + 7104, + 7107, + 7110, + 7112, + 7115, + 7116, + 7117, + 7118, + 7124, + 7126, + 7254, + 7259, + 7260, + 7261, + 7262, + 7263, + 7264, + 7265, + 7266, + 7267, + 7268, + 7269, + 7270, + 7271, + 7274, + 7272, + 7273, + 7275, + 7276, + 7277, + 7278, + 7279, + 7280, + 7284, + 7281, + 7282, + 7283, + 7285, + 7286, + 7287, + 7288, + 7289, + 7290, + 7291, + 7292, + 7293, + 7294, + 7295, + 7296, + 7297, + 7298, + 7302, + 7299, + 7300, + 7301, + 7303, + 7304, + 7305, + 7306, + 7307, + 7308, + 7309, + 7310, + 7311, + 7312, + 7313, + 7314, + 7315, + 7316, + 7317, + 7318, + 7319, + 7320, + 7321, + 7322, + 7323, + 7324, + 7325, + 7326, + 7327, + 7328, + 7329, + 7330, + 7368, + 7409, + 7410, + 7422, + 7439, + 7449, + 7448, + 7450, + 7451, + 7453, + 7452, + 7454, + 7455, + 7457, + 7458, + 7456, + 7461, + 7459, + 7460, + 7462, + 7497, + 7498, + 7556, + 7557, + 7559, + 7558, + 7578, + 7580, + 7585, + 7584, + 7591, + 7592, + 7593, + 7594, + 7629, + 7636, + 8185, + 8186 + ], + "neck": [ + 12, + 13, + 14, + 15, + 219, + 220, + 221, + 222, + 372, + 373, + 374, + 375, + 462, + 463, + 496, + 497, + 552, + 553, + 558, + 559, + 563, + 564, + 649, + 650, + 736, + 737, + 1210, + 1211, + 1212, + 1213, + 1326, + 1359, + 1360, + 1386, + 1726, + 1727, + 1759, + 1790, + 1886, + 1898, + 1931, + 1932, + 1933, + 1934, + 1940, + 1941, + 1948, + 1949, + 2036, + 2149, + 2150, + 2151, + 2218, + 2219, + 2484, + 2531, + 2870, + 2893, + 2964, + 2976, + 3012, + 3013, + 3184, + 3185, + 3186, + 3187, + 3188, + 3189, + 3190, + 3191, + 3192, + 3193, + 3194, + 3195, + 3196, + 3197, + 3198, + 3199, + 3200, + 3201, + 3202, + 3203, + 3204, + 3205, + 3206, + 3207, + 3208, + 3209, + 3210, + 3211, + 3212, + 3213, + 3353, + 3354, + 3435, + 3436, + 3445, + 3446, + 3450, + 3452, + 3453, + 3456, + 3457, + 3458, + 3459, + 3857, + 3918, + 3919, + 3944, + 3945, + 3949, + 3950, + 3956, + 3957, + 3964, + 5518, + 5519, + 5527, + 5616, + 5617, + 5649, + 5920, + 5951, + 5952, + 5953, + 5954, + 5955, + 5956, + 5957, + 5958, + 5959, + 5960, + 5961, + 5962, + 5963, + 5964, + 5965, + 5966, + 5967, + 5968, + 5969, + 5970, + 5971, + 5972, + 5973, + 5974, + 5975, + 5976, + 6196, + 6197, + 6206, + 6207, + 6211, + 6213, + 6214, + 6217, + 6218, + 6219, + 6220, + 6608, + 6666, + 6667, + 6692, + 6693, + 6697, + 6698, + 6704, + 6705, + 6712, + 8343, + 8938, + 8940, + 8988 + ], + "rightToeBase": [ + 8461, + 8462, + 8459, + 8460, + 8465, + 8466, + 8463, + 8464, + 8469, + 8470, + 8467, + 8468, + 8473, + 8474, + 8471, + 8472, + 8477, + 8478, + 8475, + 8476, + 8479, + 8480, + 8481, + 8482, + 8483, + 8484, + 8485, + 8486, + 8487, + 8488, + 8489, + 8490, + 8491, + 8492, + 8493, + 8494, + 8495, + 8496, + 8497, + 8498, + 8499, + 8500, + 8501, + 8502, + 8505, + 8506, + 8503, + 8504, + 8509, + 8510, + 8507, + 8508, + 8512, + 8511, + 8513, + 8514, + 8517, + 8518, + 8515, + 8516, + 8520, + 8519, + 8523, + 8524, + 8521, + 8522, + 8525, + 8526, + 8529, + 8530, + 8527, + 8528, + 8532, + 8531, + 8535, + 8536, + 8533, + 8534, + 8537, + 8538, + 8539, + 8540, + 8541, + 8542, + 8543, + 8544, + 8545, + 8546, + 8547, + 8548, + 8549, + 8550, + 8551, + 8552, + 8553, + 8554, + 8555, + 8556, + 8557, + 8558, + 8559, + 8560, + 8561, + 8562, + 8564, + 8563, + 8565, + 8566, + 8584, + 8587, + 8589, + 8591, + 8593, + 8595, + 8597, + 8598, + 8600, + 8602, + 8605, + 8606, + 8608, + 8610 + ], + "spine": [ + 3245, + 3244, + 3260, + 3263, + 3262, + 3261, + 3284, + 3287, + 3286, + 3285, + 3292, + 3295, + 3294, + 3293, + 3350, + 3351, + 3397, + 3400, + 3399, + 3398, + 3428, + 3431, + 3430, + 3429, + 3520, + 3519, + 3547, + 3546, + 3550, + 3549, + 3551, + 3554, + 3553, + 3552, + 3556, + 3555, + 3823, + 3822, + 3845, + 3844, + 3886, + 3891, + 3888, + 3887, + 3904, + 3907, + 3906, + 3905, + 3961, + 3960, + 3851, + 3852, + 3963, + 3962, + 3965, + 3968, + 3967, + 3966, + 3970, + 3978, + 3977, + 4115, + 4114, + 4116, + 4119, + 4118, + 4117, + 4120, + 4123, + 4122, + 4121, + 4125, + 4124, + 4126, + 4129, + 4128, + 4127, + 4400, + 5401, + 5402, + 5403, + 5404, + 5405, + 5407, + 5406, + 5409, + 5408, + 5410, + 5411, + 5414, + 5413, + 5412, + 5416, + 5415, + 5417, + 5418, + 5419, + 5420, + 5422, + 5421, + 5423, + 5425, + 5426, + 5429, + 5488, + 5489, + 5631, + 5630, + 5629, + 5495, + 5496, + 5699, + 5623, + 9022, + 9023, + 9024, + 9026, + 5939, + 5940, + 5941, + 5943, + 5948, + 5950, + 6007, + 6008, + 6023, + 6024, + 6025, + 6026, + 6047, + 6048, + 6049, + 6050, + 6055, + 6056, + 6057, + 6058, + 6113, + 6114, + 6158, + 6159, + 6160, + 6161, + 6189, + 6190, + 6191, + 6192, + 6280, + 6281, + 6307, + 6308, + 6310, + 6311, + 6312, + 6313, + 6314, + 6315, + 6316, + 6317, + 6579, + 6580, + 6600, + 6599, + 6636, + 6637, + 6638, + 6639, + 6652, + 6653, + 6654, + 6655, + 6708, + 6709, + 6710, + 6711, + 6713, + 6714, + 6715, + 6716, + 6718, + 6725, + 6726, + 6858, + 6859, + 6860, + 6861, + 6862, + 6863, + 6864, + 6865, + 6866, + 6867, + 6868, + 6869, + 6870, + 6871, + 6872, + 6873, + 7136, + 8135, + 8136, + 8137, + 8138, + 8139, + 8140, + 8141, + 8142, + 8143, + 8144, + 8145, + 8146, + 8147, + 8148, + 8149, + 8150, + 8151, + 8152, + 8153, + 8154, + 8155, + 8156, + 8157, + 8159, + 8160, + 8163, + 8323, + 8324, + 8325, + 8393, + 8722, + 8723, + 8724, + 8726 + ], + "leftUpLeg": [ + 3465, + 3464, + 3468, + 3467, + 3480, + 3479, + 3478, + 3477, + 3481, + 3484, + 3483, + 3482, + 3501, + 3500, + 3503, + 3502, + 3504, + 3507, + 3506, + 3505, + 3510, + 3509, + 3508, + 3511, + 3527, + 3530, + 3529, + 3528, + 3533, + 3532, + 3531, + 3535, + 3534, + 3536, + 3537, + 3538, + 3541, + 3540, + 3539, + 3542, + 3545, + 3544, + 3543, + 3563, + 3566, + 3565, + 3564, + 3574, + 3578, + 3577, + 3576, + 3575, + 3579, + 3582, + 3581, + 3580, + 3583, + 3586, + 3585, + 3584, + 3587, + 3588, + 3591, + 3590, + 3589, + 3592, + 3594, + 3593, + 3595, + 3598, + 3597, + 3596, + 3599, + 3602, + 3601, + 3600, + 3604, + 3603, + 3605, + 3608, + 3607, + 3606, + 3609, + 3610, + 3612, + 3611, + 3613, + 3614, + 3616, + 3615, + 3617, + 3620, + 3619, + 3618, + 3621, + 3624, + 3623, + 3622, + 3625, + 3628, + 3627, + 3626, + 3629, + 3632, + 3631, + 3630, + 3633, + 3636, + 3635, + 3634, + 3638, + 3637, + 3640, + 3639, + 3641, + 3642, + 3646, + 3645, + 3644, + 3643, + 3647, + 3650, + 3649, + 3648, + 3652, + 3651, + 3654, + 3653, + 3655, + 3656, + 3657, + 3658, + 3662, + 3661, + 3660, + 3659, + 3663, + 3664, + 3665, + 3667, + 3666, + 3668, + 3670, + 3669, + 3672, + 3671, + 3676, + 3675, + 3674, + 3673, + 3770, + 3773, + 3772, + 3771, + 3775, + 3774, + 3776, + 3777, + 3778, + 3780, + 3779, + 3781, + 3792, + 3795, + 3794, + 3793, + 3798, + 3797, + 3796, + 3799, + 3800, + 3801, + 3803, + 3802, + 3808, + 3807, + 3806, + 3805, + 3818, + 3821, + 3820, + 3819, + 3860, + 3859, + 3858, + 3861, + 3864, + 3863, + 3862, + 3866, + 3865, + 3867, + 3903, + 3902, + 3914, + 3915, + 3916, + 3917, + 3958, + 3959, + 3986, + 3992, + 3991, + 3993, + 3994, + 3995, + 3996, + 3997, + 3998, + 4086, + 4085, + 4087, + 4090, + 4089, + 4091, + 4094, + 4093, + 4092, + 4096, + 4095, + 4097, + 4109, + 4111, + 4110, + 4113, + 4112, + 4131, + 4132, + 4133, + 4134, + 4144, + 4146, + 4145, + 4147, + 4148, + 4150, + 4149, + 4151, + 4152, + 4153, + 4154, + 4165, + 4166, + 5700, + 5702, + 5701, + 5703, + 5707, + 5706, + 5708, + 5709, + 5710, + 9021, + 9025 + ], + "eyeballs": [ + 9383, + 9384, + 9385, + 9386, + 9387, + 9388, + 9389, + 9390, + 9391, + 9392, + 9393, + 9394, + 9395, + 9396, + 9397, + 9398, + 9399, + 9400, + 9401, + 9402, + 9403, + 9404, + 9405, + 9406, + 9407, + 9408, + 9409, + 9410, + 9411, + 9412, + 9413, + 9414, + 9415, + 9416, + 9417, + 9418, + 9419, + 9420, + 9421, + 9422, + 9423, + 9424, + 9425, + 9426, + 9427, + 9428, + 9429, + 9430, + 9431, + 9432, + 9433, + 9434, + 9435, + 9436, + 9437, + 9438, + 9439, + 9440, + 9441, + 9442, + 9443, + 9444, + 9445, + 9446, + 9447, + 9448, + 9449, + 9450, + 9451, + 9452, + 9453, + 9454, + 9455, + 9456, + 9457, + 9458, + 9459, + 9460, + 9461, + 9462, + 9463, + 9464, + 9465, + 9466, + 9467, + 9468, + 9469, + 9470, + 9471, + 9472, + 9473, + 9474, + 9475, + 9476, + 9477, + 9478, + 9479, + 9480, + 9481, + 9482, + 9483, + 9484, + 9485, + 9486, + 9487, + 9488, + 9489, + 9490, + 9491, + 9492, + 9493, + 9494, + 9495, + 9496, + 9497, + 9498, + 9499, + 9500, + 9501, + 9502, + 9503, + 9504, + 9505, + 9506, + 9507, + 9508, + 9509, + 9510, + 9511, + 9512, + 9513, + 9514, + 9515, + 9516, + 9518, + 9519, + 9520, + 9521, + 9522, + 9523, + 9524, + 9525, + 9526, + 9527, + 9528, + 9529, + 9531, + 9532, + 9533, + 9534, + 9535, + 9536, + 9537, + 9538, + 9539, + 9540, + 9541, + 9542, + 9544, + 9545, + 9546, + 9547, + 9548, + 9549, + 9550, + 9551, + 9552, + 9553, + 9554, + 9555, + 9557, + 9558, + 9559, + 9560, + 9561, + 9562, + 9563, + 9564, + 9565, + 9566, + 9567, + 9568, + 9570, + 9571, + 9572, + 9573, + 9574, + 9575, + 9576, + 9577, + 9578, + 9579, + 9580, + 9581, + 9583, + 9584, + 9585, + 9586, + 9587, + 9588, + 9589, + 9590, + 9591, + 9592, + 9593, + 9594, + 9596, + 9597, + 9598, + 9599, + 9600, + 9601, + 9602, + 9603, + 9604, + 9605, + 9606, + 9607, + 9609, + 9610, + 9611, + 9612, + 9613, + 9614, + 9615, + 9616, + 9617, + 9618, + 9619, + 9620, + 9622, + 9623, + 9624, + 9625, + 9626, + 9627, + 9628, + 9629, + 9630, + 9631, + 9632, + 9633, + 9635, + 9636, + 9637, + 9638, + 9639, + 9640, + 9641, + 9642, + 9643, + 9644, + 9645, + 9646, + 9648, + 9649, + 9650, + 9651, + 9652, + 9653, + 9654, + 9655, + 9656, + 9657, + 9658, + 9659, + 9661, + 9662, + 9663, + 9664, + 9665, + 9666, + 9667, + 9668, + 9669, + 9670, + 9671, + 9672, + 9674, + 9675, + 9676, + 9677, + 9678, + 9679, + 9680, + 9681, + 9682, + 9683, + 9684, + 9685, + 9687, + 9688, + 9689, + 9690, + 9691, + 9692, + 9693, + 9694, + 9695, + 9696, + 9697, + 9698, + 9700, + 9701, + 9702, + 9703, + 9704, + 9705, + 9706, + 9707, + 9708, + 9709, + 9710, + 9711, + 9713, + 9714, + 9715, + 9716, + 9717, + 9718, + 9719, + 9720, + 9721, + 9722, + 9723, + 9724, + 9726, + 9727, + 9728, + 9729, + 9730, + 9731, + 9732, + 9733, + 9734, + 9735, + 9736, + 9737, + 9739, + 9740, + 9741, + 9742, + 9743, + 9744, + 9745, + 9746, + 9747, + 9748, + 9749, + 9750, + 9752, + 9753, + 9754, + 9755, + 9756, + 9757, + 9758, + 9759, + 9760, + 9761, + 9762, + 9763, + 9765, + 9766, + 9767, + 9768, + 9769, + 9770, + 9771, + 9772, + 9773, + 9774, + 9775, + 9776, + 9778, + 9779, + 9780, + 9781, + 9782, + 9783, + 9784, + 9785, + 9786, + 9787, + 9788, + 9789, + 9791, + 9792, + 9793, + 9794, + 9795, + 9796, + 9797, + 9798, + 9799, + 9800, + 9801, + 9802, + 9803, + 9805, + 9806, + 9807, + 9808, + 9809, + 9810, + 9811, + 9812, + 9813, + 9814, + 9815, + 9816, + 9818, + 9819, + 9820, + 9821, + 9822, + 9823, + 9824, + 9825, + 9826, + 9827, + 9828, + 9829, + 9831, + 9832, + 9833, + 9834, + 9835, + 9836, + 9837, + 9838, + 9839, + 9840, + 9841, + 9842, + 9844, + 9845, + 9846, + 9847, + 9848, + 9849, + 9850, + 9851, + 9852, + 9853, + 9854, + 9855, + 9857, + 9858, + 9859, + 9860, + 9861, + 9862, + 9863, + 9864, + 9865, + 9866, + 9867, + 9868, + 9870, + 9871, + 9872, + 9873, + 9874, + 9875, + 9876, + 9877, + 9878, + 9879, + 9880, + 9881, + 9883, + 9884, + 9885, + 9886, + 9887, + 9888, + 9889, + 9890, + 9891, + 9892, + 9893, + 9894, + 9896, + 9897, + 9898, + 9899, + 9900, + 9901, + 9902, + 9903, + 9904, + 9905, + 9906, + 9907, + 9909, + 9910, + 9911, + 9912, + 9913, + 9914, + 9915, + 9916, + 9917, + 9918, + 9919, + 9920, + 9922, + 9923, + 9924, + 9925, + 9926, + 9927, + 9928, + 9929, + 9930, + 9931, + 9932, + 9933, + 9934, + 9935, + 9936, + 9937, + 9938, + 9939, + 9940, + 9941, + 9942, + 9943, + 9944, + 9945, + 9946, + 9947, + 9948, + 9949, + 9950, + 9951, + 9952, + 9953, + 9954, + 9955, + 9956, + 9957, + 9958, + 9959, + 9960, + 9961, + 9962, + 9963, + 9964, + 9965, + 9966, + 9967, + 9968, + 9969, + 9970, + 9971, + 9972, + 9973, + 9974, + 9975, + 9976, + 9977, + 9978, + 9979, + 9980, + 9981, + 9982, + 9983, + 9984, + 9985, + 9986, + 9987, + 9988, + 9989, + 9990, + 9991, + 9992, + 9993, + 9994, + 9995, + 9996, + 9997, + 9998, + 9999, + 10000, + 10001, + 10002, + 10003, + 10004, + 10005, + 10006, + 10007, + 10008, + 10009, + 10010, + 10011, + 10012, + 10013, + 10014, + 10015, + 10016, + 10017, + 10018, + 10019, + 10020, + 10021, + 10022, + 10023, + 10024, + 10025, + 10026, + 10027, + 10028, + 10029, + 10030, + 10031, + 10032, + 10033, + 10034, + 10035, + 10036, + 10037, + 10038, + 10039, + 10040, + 10041, + 10042, + 10043, + 10044, + 10045, + 10046, + 10047, + 10048, + 10049, + 10050, + 10051, + 10052, + 10053, + 10054, + 10055, + 10056, + 10057, + 10058, + 10059, + 10060, + 10061, + 10062, + 10064, + 10065, + 10066, + 10067, + 10068, + 10069, + 10070, + 10071, + 10072, + 10073, + 10074, + 10075, + 10077, + 10078, + 10079, + 10080, + 10081, + 10082, + 10083, + 10084, + 10085, + 10086, + 10087, + 10088, + 10090, + 10091, + 10092, + 10093, + 10094, + 10095, + 10096, + 10097, + 10098, + 10099, + 10100, + 10101, + 10103, + 10104, + 10105, + 10106, + 10107, + 10108, + 10109, + 10110, + 10111, + 10112, + 10113, + 10114, + 10116, + 10117, + 10118, + 10119, + 10120, + 10121, + 10122, + 10123, + 10124, + 10125, + 10126, + 10127, + 10129, + 10130, + 10131, + 10132, + 10133, + 10134, + 10135, + 10136, + 10137, + 10138, + 10139, + 10140, + 10142, + 10143, + 10144, + 10145, + 10146, + 10147, + 10148, + 10149, + 10150, + 10151, + 10152, + 10153, + 10155, + 10156, + 10157, + 10158, + 10159, + 10160, + 10161, + 10162, + 10163, + 10164, + 10165, + 10166, + 10168, + 10169, + 10170, + 10171, + 10172, + 10173, + 10174, + 10175, + 10176, + 10177, + 10178, + 10179, + 10181, + 10182, + 10183, + 10184, + 10185, + 10186, + 10187, + 10188, + 10189, + 10190, + 10191, + 10192, + 10194, + 10195, + 10196, + 10197, + 10198, + 10199, + 10200, + 10201, + 10202, + 10203, + 10204, + 10205, + 10207, + 10208, + 10209, + 10210, + 10211, + 10212, + 10213, + 10214, + 10215, + 10216, + 10217, + 10218, + 10220, + 10221, + 10222, + 10223, + 10224, + 10225, + 10226, + 10227, + 10228, + 10229, + 10230, + 10231, + 10233, + 10234, + 10235, + 10236, + 10237, + 10238, + 10239, + 10240, + 10241, + 10242, + 10243, + 10244, + 10246, + 10247, + 10248, + 10249, + 10250, + 10251, + 10252, + 10253, + 10254, + 10255, + 10256, + 10257, + 10259, + 10260, + 10261, + 10262, + 10263, + 10264, + 10265, + 10266, + 10267, + 10268, + 10269, + 10270, + 10272, + 10273, + 10274, + 10275, + 10276, + 10277, + 10278, + 10279, + 10280, + 10281, + 10282, + 10283, + 10285, + 10286, + 10287, + 10288, + 10289, + 10290, + 10291, + 10292, + 10293, + 10294, + 10295, + 10296, + 10298, + 10299, + 10300, + 10301, + 10302, + 10303, + 10304, + 10305, + 10306, + 10307, + 10308, + 10309, + 10311, + 10312, + 10313, + 10314, + 10315, + 10316, + 10317, + 10318, + 10319, + 10320, + 10321, + 10322, + 10324, + 10325, + 10326, + 10327, + 10328, + 10329, + 10330, + 10331, + 10332, + 10333, + 10334, + 10335, + 10337, + 10338, + 10339, + 10340, + 10341, + 10342, + 10343, + 10344, + 10345, + 10346, + 10347, + 10348, + 10349, + 10351, + 10352, + 10353, + 10354, + 10355, + 10356, + 10357, + 10358, + 10359, + 10360, + 10361, + 10362, + 10364, + 10365, + 10366, + 10367, + 10368, + 10369, + 10370, + 10371, + 10372, + 10373, + 10374, + 10375, + 10377, + 10378, + 10379, + 10380, + 10381, + 10382, + 10383, + 10384, + 10385, + 10386, + 10387, + 10388, + 10390, + 10391, + 10392, + 10393, + 10394, + 10395, + 10396, + 10397, + 10398, + 10399, + 10400, + 10401, + 10403, + 10404, + 10405, + 10406, + 10407, + 10408, + 10409, + 10410, + 10411, + 10412, + 10413, + 10414, + 10416, + 10417, + 10418, + 10419, + 10420, + 10421, + 10422, + 10423, + 10424, + 10425, + 10426, + 10427, + 10429, + 10430, + 10431, + 10432, + 10433, + 10434, + 10435, + 10436, + 10437, + 10438, + 10439, + 10440, + 10442, + 10443, + 10444, + 10445, + 10446, + 10447, + 10448, + 10449, + 10450, + 10451, + 10452, + 10453, + 10455, + 10456, + 10457, + 10458, + 10459, + 10460, + 10461, + 10462, + 10463, + 10464, + 10465, + 10466, + 10468, + 10469, + 10470, + 10471, + 10472, + 10473, + 10474 + ], + "leftHand": [ + 4597, + 4596, + 4595, + 4598, + 4602, + 4601, + 4600, + 4599, + 4604, + 4603, + 4606, + 4605, + 4607, + 4610, + 4609, + 4608, + 4611, + 4614, + 4613, + 4612, + 4615, + 4618, + 4617, + 4616, + 4621, + 4620, + 4619, + 4622, + 4625, + 4624, + 4623, + 4626, + 4629, + 4628, + 4627, + 4630, + 4634, + 4633, + 4632, + 4631, + 4637, + 4636, + 4635, + 4638, + 4640, + 4639, + 4647, + 4646, + 4645, + 4648, + 4649, + 4650, + 4655, + 4656, + 4659, + 4658, + 4657, + 4660, + 4663, + 4662, + 4661, + 4664, + 4667, + 4666, + 4665, + 4668, + 4671, + 4670, + 4669, + 4672, + 4674, + 4673, + 4677, + 4676, + 4675, + 4678, + 4679, + 4680, + 4684, + 4683, + 4685, + 4687, + 4686, + 4688, + 4689, + 4690, + 4693, + 4692, + 4691, + 4694, + 4697, + 4696, + 4695, + 4698, + 4699, + 4700, + 4702, + 4701, + 4703, + 4706, + 4705, + 4704, + 4707, + 4708, + 4710, + 4709, + 4711, + 4712, + 4713, + 4714, + 4715, + 4720, + 4723, + 4729, + 4728, + 4727, + 4730, + 4732, + 4731, + 4733, + 4735, + 4734, + 4736, + 4743, + 4744, + 4746, + 4745, + 4749, + 4748, + 4747, + 4751, + 4750, + 4752, + 4755, + 4754, + 4753, + 4756, + 4758, + 4757, + 4763, + 4764, + 4765, + 4768, + 4769, + 4776, + 4777, + 4778, + 4784, + 4786, + 4785, + 4789, + 4788, + 4787, + 4790, + 4792, + 4791, + 4793, + 4794, + 4796, + 4799, + 4798, + 4797, + 4804, + 4803, + 4805, + 4806, + 4807, + 4808, + 4810, + 4809, + 4811, + 4812, + 4813, + 4814, + 4815, + 4816, + 4817, + 4820, + 4822, + 4825, + 4824, + 4827, + 4826, + 4828, + 4835, + 4836, + 4837, + 4839, + 4838, + 4841, + 4840, + 4842, + 4843, + 4845, + 4849, + 4850, + 4851, + 4852, + 4853, + 4854, + 4861, + 4860, + 4862, + 4863, + 4866, + 4865, + 4864, + 4867, + 4868, + 4869, + 4871, + 4870, + 4872, + 4874, + 4873, + 4876, + 4877, + 4879, + 4878, + 4881, + 4880, + 4882, + 4883, + 4884, + 4885, + 4888, + 4889, + 4891, + 4890, + 4892, + 4893, + 4898, + 4899, + 4902, + 4901, + 4903, + 4904, + 4907, + 5211, + 5212, + 5221, + 5222, + 5311, + 5313, + 5312, + 5315, + 5314, + 5316, + 5318, + 5317, + 5319, + 5320, + 5321, + 5323, + 5322, + 5324, + 5325, + 5326, + 5327, + 5328, + 5329, + 5330, + 5331, + 5334, + 5333, + 5332, + 5335, + 5336, + 5337, + 5338, + 5339, + 5341, + 5340, + 5342, + 5344, + 5343, + 5345, + 5346, + 5347, + 5348, + 5351, + 5352, + 5354, + 5353, + 5355, + 5357, + 5356, + 5359, + 5358, + 5360, + 5362, + 5361, + 5363, + 5365, + 5364, + 5366, + 5367, + 5368, + 5369, + 5370, + 5371, + 5372, + 5373, + 5374, + 5375, + 5376, + 5377, + 5379, + 5378, + 5380, + 5381, + 5382, + 5385, + 5384, + 5383, + 5386, + 5387, + 5388, + 5389, + 5390, + 5391, + 5392, + 5393, + 5394 + ], + "hips": [ + 3263, + 3262, + 3284, + 3285, + 3292, + 3293, + 3306, + 3307, + 3308, + 3309, + 3335, + 3350, + 3428, + 3429, + 3432, + 3433, + 3434, + 3439, + 3442, + 3441, + 3440, + 3448, + 3447, + 3455, + 3454, + 3461, + 3460, + 3462, + 3465, + 3464, + 3463, + 3466, + 3469, + 3468, + 3467, + 3473, + 3472, + 3471, + 3470, + 3475, + 3474, + 3476, + 3485, + 3487, + 3486, + 3488, + 3491, + 3490, + 3489, + 3492, + 3495, + 3494, + 3493, + 3496, + 3499, + 3498, + 3497, + 3500, + 3510, + 3511, + 3515, + 3514, + 3513, + 3512, + 3517, + 3516, + 3518, + 3520, + 3519, + 3542, + 3543, + 3547, + 3546, + 3550, + 3549, + 3548, + 3562, + 3567, + 3568, + 3569, + 3798, + 3799, + 3839, + 3842, + 3841, + 3840, + 3843, + 3734, + 3735, + 3736, + 3885, + 3884, + 3903, + 3902, + 3916, + 3917, + 3804, + 3958, + 3970, + 3969, + 3972, + 3971, + 3879, + 3986, + 3993, + 3994, + 3889, + 3890, + 4081, + 4080, + 4083, + 4082, + 4084, + 4088, + 4002, + 4130, + 4144, + 4145, + 4147, + 4041, + 4165, + 4166, + 4065, + 4066, + 4292, + 4291, + 4401, + 4404, + 4403, + 4402, + 4297, + 4298, + 4405, + 4409, + 4408, + 4407, + 4406, + 4410, + 4412, + 4411, + 4414, + 4413, + 4416, + 4415, + 4417, + 4418, + 4419, + 4421, + 4420, + 4423, + 4422, + 4425, + 4424, + 4320, + 4321, + 5497, + 5492, + 5491, + 5490, + 5502, + 5498, + 5504, + 5503, + 5506, + 5505, + 5507, + 5509, + 5508, + 5511, + 5510, + 5512, + 5513, + 5515, + 5514, + 5520, + 5517, + 5516, + 5631, + 5630, + 5493, + 5494, + 5658, + 5659, + 5661, + 5660, + 5663, + 5662, + 5664, + 5665, + 5667, + 5666, + 5668, + 5669, + 5672, + 5671, + 5670, + 5673, + 5674, + 5677, + 5676, + 5675, + 5679, + 5678, + 5557, + 5680, + 5682, + 5681, + 5683, + 5686, + 5685, + 5684, + 5688, + 5687, + 5689, + 5690, + 5691, + 5692, + 5694, + 5693, + 5695, + 5574, + 5575, + 5696, + 5698, + 5697, + 5699, + 5703, + 5704, + 5705, + 5711, + 5712, + 5713, + 5714, + 5715, + 5596, + 5716, + 5718, + 5717, + 5600, + 5601, + 5719, + 5603, + 5604, + 5721, + 5720, + 5723, + 5722, + 5724, + 5725, + 5727, + 5726, + 5613, + 5614, + 5615, + 5620, + 5622, + 5931, + 5934, + 5939, + 5941, + 5942, + 5946, + 5949, + 6025, + 6026, + 6047, + 6048, + 6055, + 6056, + 6069, + 6070, + 6071, + 6072, + 6098, + 6113, + 6189, + 6190, + 6193, + 6194, + 6195, + 6200, + 6201, + 6202, + 6203, + 6208, + 6209, + 6215, + 6216, + 6221, + 6222, + 6223, + 6224, + 6225, + 6226, + 6227, + 6228, + 6229, + 6230, + 6234, + 6231, + 6232, + 6233, + 6235, + 6236, + 6237, + 6246, + 6247, + 6248, + 6249, + 6250, + 6251, + 6252, + 6253, + 6254, + 6255, + 6256, + 6257, + 6258, + 6259, + 6260, + 6261, + 6271, + 6272, + 6276, + 6273, + 6274, + 6275, + 6277, + 6278, + 6279, + 6280, + 6281, + 6303, + 6304, + 6307, + 6308, + 6309, + 6310, + 6311, + 6323, + 6328, + 6329, + 6330, + 6556, + 6557, + 6594, + 6597, + 6595, + 6596, + 6598, + 6634, + 6635, + 6650, + 6651, + 6664, + 6665, + 6706, + 6718, + 6717, + 6720, + 6719, + 6734, + 6741, + 6742, + 6824, + 6825, + 6826, + 6827, + 6828, + 6832, + 6874, + 6888, + 6889, + 6891, + 6909, + 6910, + 7137, + 7138, + 7139, + 7140, + 7141, + 7145, + 7142, + 7143, + 7144, + 7146, + 7147, + 7148, + 7149, + 7150, + 7151, + 7152, + 7153, + 7154, + 7155, + 7156, + 7157, + 7158, + 7159, + 7160, + 7161, + 8222, + 8219, + 8220, + 8221, + 8223, + 8224, + 8225, + 8226, + 8227, + 8228, + 8229, + 8230, + 8231, + 8232, + 8233, + 8234, + 8235, + 8236, + 8237, + 8238, + 8239, + 8240, + 8324, + 8325, + 8352, + 8353, + 8354, + 8355, + 8356, + 8357, + 8358, + 8359, + 8360, + 8361, + 8362, + 8363, + 8366, + 8364, + 8365, + 8367, + 8368, + 8371, + 8369, + 8370, + 8373, + 8372, + 8374, + 8375, + 8376, + 8377, + 8378, + 8379, + 8380, + 8381, + 8382, + 8383, + 8384, + 8385, + 8386, + 8387, + 8388, + 8389, + 8390, + 8391, + 8392, + 8393, + 8397, + 8398, + 8399, + 8405, + 8406, + 8407, + 8408, + 8409, + 8410, + 8411, + 8412, + 8413, + 8414, + 8415, + 8416, + 8417, + 8418, + 8419, + 8420, + 8421 + ] +} diff --git a/body_models/human_model_files/smplx/smplx_vert_segmentation.npy b/body_models/human_model_files/smplx/smplx_vert_segmentation.npy new file mode 100644 index 0000000000000000000000000000000000000000..a3e76430f7478e4996e331732cd6cb0e3bc2c22f --- /dev/null +++ b/body_models/human_model_files/smplx/smplx_vert_segmentation.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0710425b2b1e787596ab2fb10b6e622e11a79329381e4ee3f7eb03214d47c18a +size 83928 diff --git a/body_models/human_model_files/smplx/version.txt b/body_models/human_model_files/smplx/version.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac6ab56466944d45e30add02cd7607b768a66b5a --- /dev/null +++ b/body_models/human_model_files/smplx/version.txt @@ -0,0 +1 @@ +This is "Version 1.0" of SMPL-X. diff --git a/environment.yaml b/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93b497dad035a07cfd7d2dfa3febec97ee374356 --- /dev/null +++ b/environment.yaml @@ -0,0 +1,258 @@ +name: mgpt +channels: + - pytorch + - conda-forge + - defaults + - https://repo.anaconda.com/pkgs/main + - https://repo.anaconda.com/pkgs/r +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=4.5=1_gnu + - asttokens=3.0.0=pyhd8ed1ab_0 + - backcall=0.2.0=pyh9f0ad1d_0 + - blas=1.0=mkl + - bzip2=1.0.8=h7b6447c_0 + - ca-certificates=2025.1.31=hbcca054_0 + - certifi=2024.8.30=pyhd8ed1ab_0 + - comm=0.2.2=pyhd8ed1ab_0 + - cudatoolkit=10.1.243=h6bb024c_0 + - debugpy=1.4.1=py38h709712a_0 + - entrypoints=0.4=pyhd8ed1ab_0 + - executing=2.1.0=pyhd8ed1ab_0 + - ffmpeg=4.3=hf484d3e_0 + - freetype=2.10.4=h5ab3b9f_0 + - gmp=6.2.1=h2531618_2 + - gnutls=3.6.15=he1e5248_0 + - intel-openmp=2021.3.0=h06a4308_3350 + - ipykernel=6.20.2=pyh210e3f2_0 + - jpeg=9b=h024ee3a_2 + - jupyter_client=7.1.2=pyhd8ed1ab_0 + - jupyter_core=5.7.2=pyh31011fe_1 + - lame=3.100=h7b6447c_0 + - lcms2=2.12=h3be6417_0 + - ld_impl_linux-64=2.35.1=h7274673_9 + - libffi=3.3=he6710b0_2 + - libgcc-ng=9.3.0=h5101ec6_17 + - libgomp=9.3.0=h5101ec6_17 + - libiconv=1.15=h63c8f33_5 + - libidn2=2.3.2=h7f8727e_0 + - libpng=1.6.37=hbc83047_0 + - libsodium=1.0.18=h36c2ea0_1 + - libstdcxx-ng=13.2.0=hc0a3c3a_7 + - libtasn1=4.16.0=h27cfd23_0 + - libtiff=4.2.0=h85742a9_0 + - libunistring=0.9.10=h27cfd23_0 + - libuv=1.40.0=h7b6447c_0 + - libwebp-base=1.2.0=h27cfd23_0 + - lz4-c=1.9.3=h295c915_1 + - mkl=2021.3.0=h06a4308_520 + - mkl-service=2.4.0=py38h7f8727e_0 + - mkl_fft=1.3.0=py38h42c9631_2 + - mkl_random=1.2.2=py38h51133e4_0 + - ncurses=6.2=he6710b0_1 + - nest-asyncio=1.6.0=pyhd8ed1ab_0 + - nettle=3.7.3=hbbd107a_1 + - ninja=1.10.2=hff7bd54_1 + - olefile=0.46=py_0 + - openh264=2.1.0=hd408876_0 + - openjpeg=2.3.0=h05c96fa_1 + - openssl=1.1.1k=h7f98852_0 + - packaging=24.2=pyhd8ed1ab_2 + - pickleshare=0.7.5=py_1003 + - pillow=8.3.1=py38h2c7a002_0 + - pip=21.0.1=py38h06a4308_0 + - platformdirs=4.3.6=pyhd8ed1ab_0 + - prompt_toolkit=3.0.48=hd8ed1ab_1 + - ptyprocess=0.7.0=pyhd3deb0d_0 + - pure_eval=0.2.3=pyhd8ed1ab_0 + - pygments=2.18.0=pyhd8ed1ab_0 + - python=3.8.11=h12debd9_0_cpython + - python_abi=3.8=5_cp38 + - pyzmq=22.1.0=py38h2035c66_0 + - readline=8.1=h27cfd23_0 + - setuptools=52.0.0=py38h06a4308_0 + - six=1.16.0=pyhd3eb1b0_0 + - sqlite=3.36.0=hc218d9a_0 + - stack_data=0.6.2=pyhd8ed1ab_0 + - tk=8.6.10=hbc83047_0 + - torchaudio=0.8.1=py38 + - torchvision=0.9.1=py38_cu101 + - tornado=6.1=py38h497a2fe_1 + - wheel=0.37.0=pyhd3eb1b0_0 + - xz=5.2.5=h7b6447c_0 + - zeromq=4.3.4=h9c3ff4c_0 + - zlib=1.2.11=h7b6447c_3 + - zstd=1.4.9=haebb681_0 + - pip: + - absl-py==0.13.0 + - accelerate==1.0.1 + - aiohappyeyeballs==2.4.3 + - aiohttp==3.10.11 + - aiosignal==1.3.1 + - annotated-types==0.7.0 + - antlr4-python3-runtime==4.9.3 + - async-timeout==5.0.1 + - attrs==24.2.0 + - beautifulsoup4==4.12.3 + - blis==0.7.11 + - cachetools==4.2.2 + - catalogue==2.0.10 + - charset-normalizer==2.0.4 + - chumpy==0.70 + - click==8.1.7 + - clip==1.0 + - cloudpathlib==0.20.0 + - confection==0.1.5 + - cycler==0.10.0 + - cymem==2.0.10 + - decorator==5.0.9 + - diffusers==0.31.0 + - einops==0.8.0 + - ffmpeg-python==0.2.0 + - filelock==3.16.1 + - freetype-py==2.5.1 + - frozenlist==1.5.0 + - fsspec==2024.2.0 + - ftfy==6.1.1 + - future==1.0.0 + - fvcore==0.1.5.post20221221 + - gdown==5.2.0 + - glfw==2.8.0 + - google-auth==2.36.0 + - google-auth-oauthlib==0.4.6 + - grpcio==1.68.0 + - h5py==3.11.0 + - huggingface-hub==0.26.2 + - human-body-prior==2.2.2.0 + - idna==3.2 + - imageio==2.9.0 + - imageio-ffmpeg==0.5.1 + - importlib-metadata==8.5.0 + - iopath==0.1.10 + - ipdb==0.13.9 + - ipython==7.26.0 + - ipython-genutils==0.2.0 + - jedi==0.18.0 + - jinja2==3.1.3 + - joblib==1.0.1 + - kiwisolver==1.3.1 + - langcodes==3.4.1 + - language-data==1.3.0 + - lightning-utilities==0.11.9 + - marisa-trie==1.2.1 + - markdown==3.3.4 + - markdown-it-py==3.0.0 + - markupsafe==2.1.5 + - matplotlib==3.4.3 + - matplotlib-inline==0.1.2 + - mdurl==0.1.2 + - moviepy==0.2.3.1 + - mpmath==1.3.0 + - multidict==6.1.0 + - murmurhash==1.0.11 + - natsort==8.4.0 + - networkx==3.0 + - numpy==1.22.4 + - nvidia-cublas-cu11==11.11.3.6 + - nvidia-cublas-cu12==12.1.3.1 + - nvidia-cuda-cupti-cu11==11.8.87 + - nvidia-cuda-cupti-cu12==12.1.105 + - nvidia-cuda-nvrtc-cu11==11.8.89 + - nvidia-cuda-nvrtc-cu12==12.1.105 + - nvidia-cuda-runtime-cu11==11.8.89 + - nvidia-cuda-runtime-cu12==12.1.105 + - nvidia-cudnn-cu11==9.1.0.70 + - nvidia-cudnn-cu12==9.1.0.70 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-cufft-cu12==11.0.2.54 + - nvidia-curand-cu11==10.3.0.86 + - nvidia-curand-cu12==10.3.2.106 + - nvidia-cusolver-cu11==11.4.1.48 + - nvidia-cusolver-cu12==11.4.5.107 + - nvidia-cusparse-cu11==11.7.5.86 + - nvidia-cusparse-cu12==12.1.0.106 + - nvidia-nccl-cu11==2.20.5 + - nvidia-nccl-cu12==2.20.5 + - nvidia-nvjitlink-cu12==12.1.105 + - nvidia-nvtx-cu11==11.8.86 + - nvidia-nvtx-cu12==12.1.105 + - oauthlib==3.1.1 + - omegaconf==2.3.0 + - orjson==3.10.15 + - pandas==1.3.2 + - parso==0.8.2 + - pexpect==4.8.0 + - portalocker==3.0.0 + - preshed==3.0.9 + - prompt-toolkit==3.0.20 + - propcache==0.2.0 + - protobuf==5.28.3 + - psutil==6.1.0 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pydantic==2.10.1 + - pydantic-core==2.27.1 + - pydeprecate==0.3.2 + - pygame==2.6.1 + - pyglet==2.1.2 + - pyopengl==3.1.0 + - pyparsing==2.4.7 + - pyrender==0.1.45 + - pysocks==1.7.1 + - python-dateutil==2.8.2 + - pytorch-lightning==1.7.0 + - pytorch3d==0.3.0 + - pytz==2021.1 + - pyyaml==5.4.1 + - regex==2024.11.6 + - requests==2.26.0 + - requests-oauthlib==1.3.0 + - rich==13.9.4 + - rsa==4.7.2 + - safetensors==0.4.5 + - scikit-learn==0.24.2 + - scipy==1.7.1 + - sentence-transformers==3.2.1 + - sentencepiece==0.2.0 + - shapely==2.0.7 + - shellingham==1.5.4 + - sklearn==0.0 + - smart-open==7.0.5 + - smplx==0.1.28 + - soupsieve==2.6 + - spacy==3.7.5 + - spacy-legacy==3.0.12 + - spacy-loggers==1.0.5 + - srsly==2.4.8 + - sympy==1.13.1 + - tabulate==0.9.0 + - tensorboard==2.12.0 + - tensorboard-data-server==0.7.2 + - tensorboard-plugin-wit==1.8.0 + - termcolor==2.4.0 + - thinc==8.2.5 + - threadpoolctl==2.2.0 + - timm==1.0.12 + - tokenizers==0.20.3 + - toml==0.10.2 + - torch==2.4.1+cu118 + - torchgeometry==0.1.2 + - torchmetrics==0.7.0 + - tqdm==4.62.2 + - traitlets==5.0.5 + - transformers==4.46.3 + - triangle==20250106 + - trimesh==4.6.2 + - triton==3.0.0 + - typer==0.13.1 + - typing-extensions==4.12.2 + - urllib3==1.26.6 + - wasabi==1.1.3 + - wcwidth==0.2.5 + - weasel==0.4.1 + - werkzeug==2.0.1 + - wrapt==1.17.0 + - yacs==0.1.8 + - yarl==1.15.2 + - zipp==3.20.2 +prefix: /root/miniconda3/envs/mgpt \ No newline at end of file diff --git a/eval_causal_TAE.py b/eval_causal_TAE.py new file mode 100644 index 0000000000000000000000000000000000000000..e08baf784cb449d3bc9d4f27ca369ce928bd5cd0 --- /dev/null +++ b/eval_causal_TAE.py @@ -0,0 +1,101 @@ +import os +import torch +import numpy as np +from torch.utils.tensorboard import SummaryWriter +import json +import models.tae as tae +import options.option_tae as option_tae +import utils.utils_model as utils_model +import utils.eval_trans as eval_trans +from humanml3d_272 import dataset_eval_tae +import sys +import warnings +warnings.filterwarnings('ignore') + +os.chdir('Evaluator_272') +sys.path.insert(0, os.getcwd()) + + +comp_device = torch.device('cuda') + +##### ---- Exp dirs ---- ##### +args = option_tae.get_args_parser() +torch.manual_seed(args.seed) + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) + +val_loader = dataset_eval_tae.DATALoader(args.dataname, True, 32) + +##### ---- Network ---- ##### +clip_range = [-30,20] + +net = tae.Causal_HumanTAE( + hidden_size=args.hidden_size, + down_t=args.down_t, + stride_t=args.stride_t, + depth=args.depth, + dilation_growth_rate=args.dilation_growth_rate, + activation='relu', + latent_dim=args.latent_dim, + clip_range=clip_range + ) + + +print ('loading checkpoint from {}'.format(args.resume_pth)) +ckpt = torch.load(args.resume_pth, map_location='cpu') +net.load_state_dict(ckpt['net'], strict=True) +net.eval() +net.to(comp_device) + + +# load evaluator:-------------------------------- +import torch +from mld.models.architectures.temos.textencoder.distillbert_actor import DistilbertActorAgnosticEncoder +from mld.models.architectures.temos.motionencoder.actor import ActorAgnosticEncoder + +modelpath = 'distilbert-base-uncased' + +textencoder = DistilbertActorAgnosticEncoder(modelpath, num_layers=4, latent_dim=256) +motionencoder = ActorAgnosticEncoder(nfeats=272, vae = True, num_layers=4, latent_dim=256, max_len=300) + +ckpt = torch.load('epoch=99.ckpt') + +# load textencoder +textencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "textencoder": + name = k.replace("textencoder.", "") + textencoder_ckpt[name] = v +textencoder.load_state_dict(textencoder_ckpt, strict=True) +textencoder.eval() +textencoder.to(comp_device) + +# load motionencoder +motionencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "motionencoder": + name = k.replace("motionencoder.", "") + motionencoder_ckpt[name] = v +motionencoder.load_state_dict(motionencoder_ckpt, strict=True) +motionencoder.eval() +motionencoder.to(comp_device) +#-------------------------------- + +evaluator = [textencoder, motionencoder] + +fid = [] +mpjpe = [] + +best_fid, best_mpjpe, writer, logger = eval_trans.evaluation_tae_single(args.out_dir, val_loader, net, logger, writer, evaluator=evaluator, device=comp_device) +fid.append(best_fid) +mpjpe.append(best_mpjpe) + +logger.info('final result:') +logger.info(f'fid: {fid}') +logger.info(f'mpjpe: {mpjpe} (mm)') \ No newline at end of file diff --git a/eval_gt.py b/eval_gt.py new file mode 100644 index 0000000000000000000000000000000000000000..b7ad8d8a9c3be8c29e19ef8988c2bec60fad2a0a --- /dev/null +++ b/eval_gt.py @@ -0,0 +1,62 @@ +import os +import torch +from utils import eval_trans +from humanml3d_272 import dataset_eval_tae +from options import option_transformer as option_trans +import warnings +import sys +warnings.filterwarnings('ignore') +os.chdir('Evaluator_272') +sys.path.insert(0, os.getcwd()) +comp_device = torch.device('cuda') +args = option_trans.get_args_parser() +torch.manual_seed(args.seed) +val_loader = dataset_eval_tae.DATALoader(args.dataname, True, 32) + + +# load evaluator:-------------------------------- +from mld.models.architectures.temos.textencoder.distillbert_actor import DistilbertActorAgnosticEncoder +from mld.models.architectures.temos.motionencoder.actor import ActorAgnosticEncoder + +modelpath = 'distilbert-base-uncased' + +textencoder = DistilbertActorAgnosticEncoder(modelpath, num_layers=4) +motionencoder = ActorAgnosticEncoder(nfeats=272, vae = True, num_layers=4, max_len=300) + +ckpt_path = 'epoch=99.ckpt' +print(f'Loading evaluator checkpoint from {ckpt_path}') +ckpt = torch.load(ckpt_path) +# load textencoder +textencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "textencoder": + name = k.replace("textencoder.", "") + textencoder_ckpt[name] = v +textencoder.load_state_dict(textencoder_ckpt, strict=True) +textencoder.eval() +textencoder.to(comp_device) + +# load motionencoder +motionencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "motionencoder": + name = k.replace("motionencoder.", "") + motionencoder_ckpt[name] = v +motionencoder.load_state_dict(motionencoder_ckpt, strict=True) +motionencoder.eval() +motionencoder.to(comp_device) +#-------------------------------- + +evaluator = [textencoder, motionencoder] + +gt_fid, gt_div, gt_top1, gt_top2, gt_top3, gt_matching = eval_trans.evaluation_gt(val_loader, evaluator, device=comp_device) + +print('final result:') +print(f'gt_fid: {gt_fid}') +print(f'gt_div: {gt_div}') +print(f'gt_top1: {gt_top1}') +print(f'gt_top2: {gt_top2}') +print(f'gt_top3: {gt_top3}') +print(f'gt_MM-dist (matching score): {gt_matching}') + + diff --git a/eval_t2m.py b/eval_t2m.py new file mode 100644 index 0000000000000000000000000000000000000000..892a83f10a5e479665435ea5c3260428a4ced5a5 --- /dev/null +++ b/eval_t2m.py @@ -0,0 +1,143 @@ +import os +import torch +import numpy as np +from torch.utils.tensorboard import SummaryWriter +import json +import sys +from models.llama_model import LLaMAHF, LLaMAHFConfig +import options.option_transformer as option_trans +import utils.utils_model as utils_model +import utils.eval_trans as eval_trans +from humanml3d_272 import dataset_eval_tae +import models.tae as tae +import warnings +warnings.filterwarnings('ignore') +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +os.chdir('Evaluator_272') +sys.path.insert(0, os.getcwd()) + +comp_device = torch.device('cuda') + +##### ---- Exp dirs ---- ##### +args = option_trans.get_args_parser() +torch.manual_seed(args.seed) + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) +val_loader = dataset_eval_tae.DATALoader(args.dataname, True, 32) + +##### ---- Network ---- ##### +from sentence_transformers import SentenceTransformer +t5_model = SentenceTransformer('../sentencet5-xxl/') +t5_model.eval() +for p in t5_model.parameters(): + p.requires_grad = False +tokenize_model = t5_model + +# Causal TAE + +clip_range = [-30,20] + +net = tae.Causal_HumanTAE( + hidden_size=args.hidden_size, + down_t=args.down_t, + stride_t=args.stride_t, + depth=args.depth, + dilation_growth_rate=args.dilation_growth_rate, + activation='relu', + latent_dim=args.latent_dim, + clip_range=clip_range + ) + +config = LLaMAHFConfig.from_name('Normal_size') +config.block_size = 78 +trans_encoder = LLaMAHF(config, args.num_diffusion_head_layers, args.latent_dim, comp_device) + +print('loading checkpoint from {}'.format(args.resume_pth)) +ckpt = torch.load(args.resume_pth, map_location='cpu') +net.load_state_dict(ckpt['net'], strict=True) +net.eval() +net.to(comp_device) + + +if args.resume_trans is not None: + print('loading transformer checkpoint from {}'.format(args.resume_trans)) + ckpt = torch.load(args.resume_trans, map_location='cpu') + new_ckpt_trans = {} + for key in ckpt['trans'].keys(): + if key.split('.')[0]=='module': + new_key = '.'.join(key.split('.')[1:]) + else: + new_key = key + new_ckpt_trans[new_key] = ckpt['trans'][key] + trans_encoder.load_state_dict(new_ckpt_trans, strict=True) +trans_encoder.eval() +trans_encoder.to(comp_device) + +# load evaluator: +import torch +from transformers import AutoTokenizer, AutoModel +from mld.models.architectures.temos.textencoder.distillbert_actor import DistilbertActorAgnosticEncoder +from mld.models.architectures.temos.motionencoder.actor import ActorAgnosticEncoder +from collections import OrderedDict + +modelpath = 'distilbert-base-uncased' + +textencoder = DistilbertActorAgnosticEncoder(modelpath, num_layers=4, latent_dim=256) +motionencoder = ActorAgnosticEncoder(nfeats=272, vae = True, num_layers=4, latent_dim=256, max_len=300) + +ckpt_path = 'epoch=99.ckpt' +print(f'Loading evaluator checkpoint from {ckpt_path}') +ckpt = torch.load(ckpt_path) +# load textencoder +textencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "textencoder": + name = k.replace("textencoder.", "") + textencoder_ckpt[name] = v +textencoder.load_state_dict(textencoder_ckpt, strict=True) +textencoder.eval() +textencoder.to(comp_device) + +# load motionencoder +motionencoder_ckpt = {} +for k, v in ckpt['state_dict'].items(): + if k.split(".")[0] == "motionencoder": + name = k.replace("motionencoder.", "") + motionencoder_ckpt[name] = v +motionencoder.load_state_dict(motionencoder_ckpt, strict=True) +motionencoder.eval() +motionencoder.to(comp_device) +#-------------------------------- + +evaluator = [textencoder, motionencoder] + +fid = [] +div = [] +top1 = [] +top2 = [] +top3 = [] +matching = [] +mpjpe = [] + +best_fid, best_div, best_top1, best_top2, best_top3, best_matching, logger = eval_trans.evaluation_transformer_272_single(val_loader, net, trans_encoder, tokenize_model, logger, evaluator, 4.0) +fid.append(best_fid) +div.append(best_div) +top1.append(best_top1) +top2.append(best_top2) +top3.append(best_top3) +matching.append(best_matching) + +logger.info('final result:') +logger.info(f'fid: {fid}') +logger.info(f'div: {div}') +logger.info(f'top1: {top1}') +logger.info(f'top2: {top2}') +logger.info(f'top3: {top3}') +logger.info(f'MM-dist (matching score) : {matching}') diff --git a/ga.txt b/ga.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a85a8013a8a4c5dc150407985fcdb964e60b6e1 --- /dev/null +++ b/ga.txt @@ -0,0 +1,134 @@ +--extra-index-url https://download.pytorch.org/whl/cu121 +absl-py==0.13.0 +accelerate==1.0.1 +aiohappyeyeballs==2.4.3 +aiohttp==3.10.11 +aiosignal==1.3.1 +annotated-types==0.7.0 +antlr4-python3-runtime==4.9.3 +async-timeout==5.0.1 +attrs==24.2.0 +beautifulsoup4==4.12.3 +cachetools==4.2.2 +charset-normalizer==2.0.4 +chumpy==0.70 +click==8.1.7 +clip @ git+https://github.com/openai/CLIP.git@main#egg=clip +cloudpathlib==0.20.0 +cycler==0.10.0 +decorator==5.0.9 +diffusers==0.31.0 +einops==0.8.0 +ffmpeg-python==0.2.0 +filelock==3.16.1 +freetype-py==2.5.1 +frozenlist==1.5.0 +fsspec==2024.2.0 +ftfy==6.1.1 +future==1.0.0 +fvcore==0.1.5.post20221221 +gdown==5.2.0 +glfw==2.8.0 +google-auth==2.36.0 +google-auth-oauthlib==0.4.6 +grpcio==1.68.0 +h5py==3.11.0 +huggingface-hub==0.26.2 +human-body-prior @ git+https://github.com/nghorbani/human_body_prior.git@master#egg=human-body-prior +idna==3.2 +imageio==2.9.0 +imageio-ffmpeg==0.5.1 +importlib-metadata==8.5.0 +iopath==0.1.10 +ipdb==0.13.9 +ipython==7.26.0 +ipython-genutils==0.2.0 +jedi==0.18.0 +jinja2==3.1.3 +joblib==1.0.1 +kiwisolver==1.3.1 +langcodes==3.4.1 +language-data==1.3.0 +lightning-utilities==0.11.9 +marisa-trie==1.2.1 +markdown==3.3.4 +markdown-it-py==3.0.0 +markupsafe==2.1.5 +matplotlib==3.4.3 +matplotlib-inline==0.1.2 +mdurl==0.1.2 +moviepy==0.2.3.1 +mpmath==1.3.0 +multidict==6.1.0 +natsort==8.4.0 +networkx==3.0 +oauthlib==3.1.1 +omegaconf==2.3.0 +orjson==3.10.15 +pandas==1.3.2 +parso==0.8.2 +pexpect==4.8.0 +portalocker==3.0.0 +prompt-toolkit==3.0.20 +propcache==0.2.0 +protobuf==5.28.3 +psutil==6.1.0 +pyasn1==0.4.8 +pyasn1-modules==0.2.8 +pydantic==2.10.1 +pydantic-core==2.27.1 +pydeprecate==0.3.2 +pygame==2.6.1 +pyglet==2.1.2 +pyopengl==3.1.0 +pyparsing==2.4.7 +pyrender==0.1.45 +pysocks==1.7.1 +python-dateutil==2.8.2 +pytorch-lightning==1.7.0 +pytorch3d==0.3.0 +pytz==2021.1 +pyyaml==5.4.1 +regex==2024.11.6 +requests==2.26.0 +requests-oauthlib==1.3.0 +rich==13.9.4 +rsa==4.7.2 +safetensors==0.4.5 +scikit-learn==0.24.2 +scipy==1.7.1 +sentence-transformers==3.2.1 +sentencepiece==0.2.0 +shapely==2.0.7 +shellingham==1.5.4 +smart-open==7.0.5 +smplx==0.1.28 +soupsieve==2.6 +sympy==1.13.1 +tabulate==0.9.0 +tensorboard==2.12.0 +tensorboard-data-server==0.7.2 +tensorboard-plugin-wit==1.8.0 +termcolor==2.4.0 +threadpoolctl==2.2.0 +timm==1.0.12 +tokenizers==0.20.3 +toml==0.10.2 +torch==2.4.1+cu121 +torchgeometry==0.1.2 +torchmetrics==0.7.0 +tqdm==4.62.2 +traitlets>=5.4.0,<=6 +transformers==4.46.3 +triangle==20250106 +trimesh==4.6.2 +triton==3.0.0 +typer==0.13.1 +typing-extensions==4.12.2 +urllib3==1.26.6 +wcwidth==0.2.5 +werkzeug==2.0.1 +wrapt==1.17.0 +yacs==0.1.8 +yarl==1.15.2 +zipp==3.20.2 \ No newline at end of file diff --git a/get_latent.py b/get_latent.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5a5ffb41ba6c1cd6ba0196862898ea14270e94 --- /dev/null +++ b/get_latent.py @@ -0,0 +1,67 @@ +import os +import torch +import numpy as np + +from torch.utils.tensorboard import SummaryWriter +from os.path import join as pjoin +import json +import models.tae as tae +import options.option_tae as option_tae +import utils.utils_model as utils_model +from humanml3d_272 import dataset_tae_tokenizer +import warnings +from tqdm import tqdm +warnings.filterwarnings('ignore') + +##### ---- Exp dirs ---- ##### +args = option_tae.get_args_parser() +torch.manual_seed(args.seed) + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) + +##### ---- Dataloader ---- ##### +train_loader = dataset_tae_tokenizer.DATALoader(args.dataname) + +clip_range = [-30,20] + +net = tae.Causal_HumanTAE( + hidden_size=args.hidden_size, + down_t=args.down_t, + stride_t=args.stride_t, + depth=args.depth, + dilation_growth_rate=args.dilation_growth_rate, + activation='relu', + latent_dim=args.latent_dim, + clip_range=clip_range + ) + +logger.info('loading checkpoint from {}'.format(args.resume_pth)) +ckpt = torch.load(args.resume_pth, map_location='cpu') +net.load_state_dict(ckpt['net'], strict=True) +net.eval() +net.cuda() + + +##### ---- get reference end latent ---- ##### +reference_end_pose = torch.zeros(1, 4, 272).cuda() # impossible pose prior +reference_end_latent, _, _ = net.encode(reference_end_pose) +reference_end_latent = reference_end_latent.permute(1,0) +np.save(f'reference_end_latent_{args.dataname}.npy', reference_end_latent.cpu().detach().numpy()) + +os.makedirs(args.latent_dir, exist_ok = True) + +for batch in tqdm(train_loader): + pose, name = batch + bs, seq = pose.shape[0], pose.shape[1] + pose = pose.cuda().float() + latent, _, _ = net.encode(pose) + latent = latent.permute(1,0) + latent = torch.cat([latent, reference_end_latent], dim=0) + latent = latent.cpu().detach().numpy() + np.save(pjoin(args.latent_dir, name[0] +'.npy'), latent) diff --git a/humanml3d_272/.DS_Store b/humanml3d_272/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..276c9f1e1f5bf0aa907a371e03be28fe2ff521b6 Binary files /dev/null and b/humanml3d_272/.DS_Store differ diff --git a/humanml3d_272/dataset_TM_train.py b/humanml3d_272/dataset_TM_train.py new file mode 100644 index 0000000000000000000000000000000000000000..4c88f6b6ae2e92686c4e4239dcce6f54e41c5ff9 --- /dev/null +++ b/humanml3d_272/dataset_TM_train.py @@ -0,0 +1,141 @@ +import torch +from torch.utils import data +import numpy as np +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm +import utils.paramUtil as paramUtil +from torch.utils.data._utils.collate import default_collate +import os + +def collate_fn(batch): + batch.sort(key=lambda x: x[2], reverse=True) + return default_collate(batch) + + +'''For use of training text-2-motion generative model''' +class Text2MotionDataset(data.Dataset): + def __init__(self, dataset_name, unit_length = 4, latent_dir=None): + + self.max_length = 64 + self.pointer = 0 + self.dataset_name = dataset_name + self.unit_length = unit_length + + if dataset_name == 't2m_272': + self.data_root = './humanml3d_272' + self.text_dir = pjoin(self.data_root, 'texts') + self.joints_num = 22 + fps = 30 + self.max_motion_length = 78 + dim_pose = 272 + split_file = pjoin(self.data_root, 'split', 'train.txt') + + else: + raise ValueError(f"Dataset {dataset_name} not supported") + + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + new_name_list = [] + data_dict = {} + for name in tqdm(id_list): + try: + m_token_list = np.load(pjoin(latent_dir, '%s.npy'%name)) + except: + continue + + # Read text + with cs.open(pjoin(self.text_dir, name + '.txt')) as f: + text_data = [] + flag = False + lines = f.readlines() + + for line in lines: + text_dict = {} + line_split = line.strip().split('#') + caption = line_split[0] + t_tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict['caption'] = caption + text_dict['tokens'] = t_tokens + + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + if int(f_tag*fps/unit_length) < int(to_tag*fps/unit_length): + m_token_list_new = [m_token_list[int(f_tag*fps/unit_length) : int(to_tag*fps/unit_length)]] + + if len(m_token_list_new) == 0: + continue + + new_name = '%s_%f_%f'%(name, f_tag, to_tag) + + data_dict[new_name] = {'m_token_list': m_token_list_new, + 'text':[text_dict]} + new_name_list.append(new_name) + + if flag: + data_dict[name] = {'m_token_list': m_token_list, + 'text':text_data} + new_name_list.append(name) + + self.data_dict = data_dict + self.name_list = new_name_list + + def __len__(self): + return len(self.data_dict) + + def __getitem__(self, item): + data = self.data_dict[self.name_list[item]] + m_token_list, text_list = data['m_token_list'], data['text'] + m_tokens = np.array(m_token_list) + + text_data = random.choice(text_list) + caption= text_data['caption'] + + if len(m_tokens.shape) == 3: + m_tokens = m_tokens.squeeze(0) + coin = np.random.choice([False, False, True]) + if coin: + coin2 = np.random.choice([True, False]) + if coin2: + m_tokens = m_tokens[:-1] + else: + m_tokens = m_tokens[1:] + m_tokens_len = m_tokens.shape[0] + + if m_tokens_len < self.max_motion_length: + m_tokens = np.concatenate([m_tokens, np.zeros((self.max_motion_length-m_tokens_len, m_tokens.shape[1]), dtype=int)], axis=0) + return caption, m_tokens, m_tokens_len + + + + +def DATALoader(dataset_name, + batch_size, latent_dir, unit_length=4, + num_workers = 8) : + + train_loader = torch.utils.data.DataLoader(Text2MotionDataset(dataset_name, latent_dir = latent_dir, unit_length=unit_length), + batch_size, + shuffle=True, + num_workers=num_workers, + drop_last = True) + + return train_loader + + +def cycle(iterable): + while True: + for x in iterable: + yield x + diff --git a/humanml3d_272/dataset_TM_train_motionstreamer.py b/humanml3d_272/dataset_TM_train_motionstreamer.py new file mode 100644 index 0000000000000000000000000000000000000000..63bf22ef621e41a5e441f1ba213012ed7c37f6a9 --- /dev/null +++ b/humanml3d_272/dataset_TM_train_motionstreamer.py @@ -0,0 +1,175 @@ +import torch +from torch.utils import data +import numpy as np +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm +from torch.utils.data._utils.collate import default_collate +import os + +def collate_fn(batch): + batch.sort(key=lambda x: x[3], reverse=True) + return default_collate(batch) + + +class Text2MotionDataset(data.Dataset): + def __init__(self, dataset_name, unit_length = 4, latent_dir=None): + + self.max_length = 64 + self.pointer = 0 + self.dataset_name = dataset_name + self.unit_length = unit_length + + if dataset_name == 't2m_babel_272': + # Babel-272-stream data dir + self.babel_stream_data_root = './babel_272_stream' + self.babel_stream_text_dir = pjoin(self.babel_stream_data_root, 'train_stream_text') + fps = 30 + self.max_motion_length = 78 + + # HumanML3D-272 data dir + self.hml_data_root = './humanml3d_272' + self.hml_text_dir = pjoin(self.hml_data_root, 'texts') + + else: + raise ValueError(f'Invalid dataset name: {dataset_name}') + + id_list = [] + + for file in os.listdir(latent_dir): + if file.endswith('.npy'): + id_list.append(file[:-4]) + + new_name_list = [] + data_dict = {} + for name in tqdm(id_list): + m_token_list = np.load(pjoin(latent_dir, '%s.npy'%name)) + + if len(m_token_list) > self.max_motion_length: + continue + + # Read text + if name.split('_')[0] == 'seq': + # Babel-272-stream + with cs.open(pjoin(self.babel_stream_text_dir, name + '.txt')) as f: + text_data = [] + flag = False + lines = f.readlines() + + for line in lines: + text_dict = {} + B_split = line.strip().split('*')[1].split('#') + B_text = line.strip().split('*')[1].split('#')[0] + if B_text == '': + continue + B_t_tokens = B_split[1].split(' ') + A_motion_length = B_split[-1] + A_token_length = int(A_motion_length) // unit_length + text_dict['caption'] = B_text + text_dict['tokens'] = B_t_tokens + + flag = True + text_data.append(text_dict) + + else: + # HumanML3D-272 + with cs.open(pjoin(self.hml_text_dir, name + '.txt')) as f: + text_data = [] + flag = False + lines = f.readlines() + + for line in lines: + text_dict = {} + + line_split = line.strip().split('#') + caption = line_split[0] + + t_tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + + A_token_length = 0 + + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + text_dict['caption'] = caption + text_dict['tokens'] = t_tokens + + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + if int(f_tag*fps/unit_length) < int(to_tag*fps/unit_length): + m_token_list_new = [m_token_list[int(f_tag*fps/unit_length) : int(to_tag*fps/unit_length)]] + + if len(m_token_list_new) == 0: + continue + new_name = '%s_%f_%f'%(name, f_tag, to_tag) + + data_dict[new_name] = {'m_token_list': m_token_list_new, + 'text':[text_dict], + 'A_token_length': A_token_length + } + new_name_list.append(new_name) + + if flag: + + data_dict[name] = {'m_token_list': m_token_list, + 'text':text_data, + 'A_token_length': A_token_length + } + new_name_list.append(name) + + self.data_dict = data_dict + self.name_list = new_name_list + + def __len__(self): + return len(self.data_dict) + + def __getitem__(self, item): + data = self.data_dict[self.name_list[item]] + m_token_list, text_list = data['m_token_list'], data['text'] + m_tokens = np.array(m_token_list) + + + text_data = random.choice(text_list) + caption= text_data['caption'] + + if len(m_tokens.shape) == 3: + m_tokens = m_tokens.squeeze(0) + + + A_token_length = data['A_token_length'] + m_tokens_len = m_tokens.shape[0] + + + if m_tokens_len < self.max_motion_length: + m_tokens = np.concatenate([m_tokens, np.zeros((self.max_motion_length - m_tokens_len, m_tokens.shape[1]), dtype=int)], axis=0) + + + return caption, m_tokens, m_tokens_len, A_token_length + + + + +def DATALoader(dataset_name, + batch_size, unit_length=4, + num_workers = 8, latent_dir = None) : + + train_loader = torch.utils.data.DataLoader(Text2MotionDataset(dataset_name, unit_length=unit_length, latent_dir=latent_dir), + batch_size, + shuffle=True, + num_workers=num_workers, + #collate_fn=collate_fn, + drop_last = True) + + + return train_loader + + +def cycle(iterable): + while True: + for x in iterable: + yield x + diff --git a/humanml3d_272/dataset_eval_tae.py b/humanml3d_272/dataset_eval_tae.py new file mode 100644 index 0000000000000000000000000000000000000000..966682650c0b81ed54544e6c70bbcea4e0cff36f --- /dev/null +++ b/humanml3d_272/dataset_eval_tae.py @@ -0,0 +1,179 @@ +import torch +from torch.utils import data +import numpy as np +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm +import os +import utils.paramUtil as paramUtil +from torch.utils.data._utils.collate import default_collate + + +def collate_fn(batch): + batch.sort(key=lambda x: x[1], reverse=True) + return default_collate(batch) + + +class Text2MotionDataset(data.Dataset): + def __init__(self, dataset_name, is_test, max_text_len = 20, unit_length = 4): + + self.max_length = 20 + self.pointer = 0 + self.dataset_name = dataset_name + self.is_test = is_test + self.max_text_len = max_text_len + self.unit_length = unit_length + + + if dataset_name == 't2m_272': + self.data_root = './humanml3d_272' + self.motion_dir = pjoin(self.data_root, 'motion_data') + self.text_dir = pjoin(self.data_root, 'texts') + self.joints_num = 22 + self.max_motion_length = 300 + fps = 30 + self.meta_dir = './humanml3d_272/mean_std' + if is_test: + split_file = pjoin(self.data_root, 'split', 'test.txt') + else: + split_file = pjoin(self.data_root, 'split', 'val.txt') + elif dataset_name == 't2m_babel_272': + # HumanML3D-272 data + self.hml_data_root = './humanml3d_272' + self.hml_motion_dir = pjoin(self.hml_data_root, 'motion_data') + if is_test: + hml_split_file = pjoin(self.hml_data_root, 'split', 'test.txt') + else: + hml_split_file = pjoin(self.hml_data_root, 'split', 'val.txt') + + self.joints_num = 22 + self.max_motion_length = 300 + fps = 30 + + # Babel-272 data + self.babel_data_root = './babel_272' + self.babel_motion_dir = pjoin(self.babel_data_root, 'motion_data') + babel_split_file = pjoin(self.babel_data_root, 'split', 'val.txt') + self.meta_dir = pjoin(self.babel_data_root, 't2m_babel_mean_std') + + mean = np.load(pjoin(self.meta_dir, 'Mean.npy')) + std = np.load(pjoin(self.meta_dir, 'Std.npy')) + + min_motion_len = 60 # 30 fps + + data_dict = {} + id_list = [] + + if dataset_name == 't2m_272': + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + elif dataset_name == 't2m_babel_272': + with cs.open(hml_split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + with cs.open(babel_split_file, 'r') as f: + for line in f.readlines(): + id_list.append('b_' + line.strip()) + + new_name_list = [] + length_list = [] + + + for name in tqdm(id_list): + try: + if dataset_name == 't2m_272': + motion = np.load(pjoin(self.motion_dir, name + '.npy')) + elif dataset_name == 't2m_babel_272': + if name.split('_')[0] == 'b': + motion = np.load(pjoin(self.babel_motion_dir, name.split('_')[1] + '.npy')) + else: + motion = np.load(pjoin(self.hml_motion_dir, name + '.npy')) + + if (len(motion)) < min_motion_len or (len(motion) >= self.max_motion_length): + continue + + data_dict[name] = {'motion': motion, + 'length': len(motion) + } + new_name_list.append(name) + length_list.append(len(motion)) + except: + pass + + + name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1])) + self.mean = mean + self.std = std + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = name_list + self.reset_max_len(self.max_length) + + def reset_max_len(self, length): + assert length <= self.max_motion_length + self.pointer = np.searchsorted(self.length_arr, length) + print("Pointer Pointing at %d"%self.pointer) + self.max_length = length + + def inv_transform(self, data): + return data * self.std + self.mean + + def forward_transform(self, data): + return (data - self.mean) / self.std + + def __len__(self): + return len(self.data_dict) - self.pointer + + def __getitem__(self, item): + idx = self.pointer + item + name = self.name_list[idx] + data = self.data_dict[name] + motion, m_length = data['motion'], data['length'] + + + if self.unit_length < 10: + coin2 = np.random.choice(['single', 'single', 'double']) + else: + coin2 = 'single' + + if coin2 == 'double': + m_length = (m_length // self.unit_length - 1) * self.unit_length + elif coin2 == 'single': + m_length = (m_length // self.unit_length) * self.unit_length + + idx = random.randint(0, len(motion) - m_length) + motion = motion[idx:idx+m_length] + + #"Motion Normalization" + motion = (motion - self.mean) / self.std + + if m_length < self.max_motion_length: + motion = np.concatenate([motion, + np.zeros((self.max_motion_length - m_length, motion.shape[1])) + ], axis=0) + + return motion, m_length + + + + +def DATALoader(dataset_name, is_test, + batch_size, + num_workers = 64, unit_length = 4, drop_last=True) : + + val_loader = torch.utils.data.DataLoader(Text2MotionDataset(dataset_name, is_test, unit_length=unit_length), + batch_size, + shuffle = True, + num_workers=num_workers, + collate_fn=collate_fn, + drop_last = drop_last) + return val_loader + + +def cycle(iterable): + while True: + for x in iterable: + yield x \ No newline at end of file diff --git a/humanml3d_272/dataset_tae.py b/humanml3d_272/dataset_tae.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa5222196121089f2196f1fd063ca26f4bd131e --- /dev/null +++ b/humanml3d_272/dataset_tae.py @@ -0,0 +1,132 @@ +import torch +from torch.utils import data +import numpy as np +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm + + + +class MotionDataset(data.Dataset): + def __init__(self, dataset_name, window_size = 64, unit_length = 4): + self.window_size = window_size + self.unit_length = unit_length + self.dataset_name = dataset_name + + if dataset_name == 't2m_272': + self.data_root = './humanml3d_272' + self.motion_dir = pjoin(self.data_root, 'motion_data') + self.text_dir = pjoin(self.data_root, 'texts') + self.joints_num = 22 + self.max_motion_length = 300 + self.meta_dir = pjoin(self.data_root, 'mean_std') + split_file = pjoin(self.data_root, 'split', 'train.txt') + + elif dataset_name == 't2m_babel_272': + self.hml_data_root = './humanml3d_272' + self.hml_motion_dir = pjoin(self.hml_data_root, 'motion_data') + hml_split_file = pjoin(self.hml_data_root, 'split', 'train.txt') + self.joints_num = 22 + self.max_motion_length = 300 + + self.babel_data_root = './babel_272' + self.babel_motion_dir = pjoin(self.babel_data_root, 'motion_data') + babel_split_file = pjoin(self.babel_data_root, 'split', 'train.txt') + self.meta_dir = pjoin(self.babel_data_root, 't2m_babel_mean_std') + else: + raise ValueError(f'Dataset {dataset_name} not found') + + mean = np.load(pjoin(self.meta_dir, 'Mean.npy')) + std = np.load(pjoin(self.meta_dir, 'Std.npy')) + + self.data = [] + self.lengths = [] + id_list = [] + + if dataset_name == 't2m_272': + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + elif dataset_name == 't2m_babel_272': + with cs.open(hml_split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + with cs.open(babel_split_file, 'r') as f: + for line in f.readlines(): + id_list.append('b_' + line.strip()) + + for name in tqdm(id_list): + try: + if dataset_name == 't2m_272': + motion = np.load(pjoin(self.motion_dir, name + '.npy')) + elif dataset_name == 't2m_babel_272': + if name.split('_')[0] == 'b': + # Babel-272 + motion = np.load(pjoin(self.babel_motion_dir, name.split('_')[1] + '.npy')) + else: + # HumanML3D-272 + motion = np.load(pjoin(self.hml_motion_dir, name + '.npy')) + else: + raise ValueError(f'Dataset {dataset_name} not found') + + if motion.shape[0] < self.window_size: + continue + self.lengths.append(motion.shape[0] - self.window_size) + self.data.append(motion) + except: + pass + + print(f'Training on {len(self.data)} motion sequences...') + + self.mean = mean + self.std = std + + + def inv_transform(self, data): + return data * self.std + self.mean + + def compute_sampling_prob(self) : + + prob = np.array(self.lengths, dtype=np.float32) + prob /= np.sum(prob) + return prob + + def __len__(self): + return len(self.data) + + def __getitem__(self, item): + motion = self.data[item] + + idx = random.randint(0, len(motion) - self.window_size) + + motion = motion[idx:idx+self.window_size] + # Motion Normalization + motion = (motion - self.mean) / self.std + + return motion + +def DATALoader(dataset_name, + batch_size, + num_workers = 64, + window_size = 64, + unit_length = 4): + + trainSet = MotionDataset(dataset_name, window_size=window_size, unit_length=unit_length) + prob = trainSet.compute_sampling_prob() + sampler = torch.utils.data.WeightedRandomSampler(prob, num_samples = len(trainSet) * 1000, replacement=True) + train_loader = torch.utils.data.DataLoader(trainSet, + batch_size, + shuffle=True, + #sampler=sampler, + num_workers=num_workers, + #collate_fn=collate_fn, + drop_last = True) + + return train_loader + +def cycle(iterable): + while True: + for x in iterable: + yield x \ No newline at end of file diff --git a/humanml3d_272/dataset_tae_tokenizer.py b/humanml3d_272/dataset_tae_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..628760ed6583c64b9a734c918b5440911c7f87f8 --- /dev/null +++ b/humanml3d_272/dataset_tae_tokenizer.py @@ -0,0 +1,132 @@ +import torch +from torch.utils import data +import numpy as np +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm +import os + + +class MotionDataset(data.Dataset): + def __init__(self, dataset_name, feat_bias = 5, window_size = 64, unit_length = 4): + self.window_size = window_size + self.unit_length = unit_length + self.feat_bias = feat_bias + + self.dataset_name = dataset_name + min_motion_len = 40 + + + if dataset_name == 't2m_272': + self.data_root = './humanml3d_272' + self.motion_dir = pjoin(self.data_root, 'motion_data') + self.meta_dir = pjoin(self.data_root, 'mean_std') + split_file = pjoin(self.data_root, 'split', 'train.txt') + + elif dataset_name == 't2m_babel_272': + # HumanML3D-272 data dir + self.hml_data_root = './humanml3d_272' + self.hml_motion_dir = pjoin(self.hml_data_root, 'motion_data') + hml_split_file = pjoin(self.hml_data_root, 'split', 'train.txt') + + # Babel-272-stream data dir + self.babel_stream_data_root = './babel_272_stream' + self.babel_stream_motion_dir = pjoin(self.babel_stream_data_root, 'train_stream') + self.meta_dir = './babel_272/t2m_babel_mean_std' + + else: + raise ValueError(f"Invalid dataset name: {dataset_name}") + + + mean = np.load(pjoin(self.meta_dir, 'Mean.npy')) + std = np.load(pjoin(self.meta_dir, 'Std.npy')) + + data_dict = {} + id_list = [] + + if dataset_name == 't2m_272': + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + elif dataset_name == 't2m_babel_272': + # HumanML3D-272 data + with cs.open(hml_split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + # Babel-272-stream data + for file in os.listdir(self.babel_stream_motion_dir): + if file.endswith('.npy'): + id_list.append(file[:-4]) # seq_1, seq_2, ... + + new_name_list = [] + length_list = [] + for name in tqdm(id_list): + try: + if dataset_name == 't2m_272': + motion = np.load(pjoin(self.motion_dir, name + '.npy')) + if (len(motion)) < min_motion_len: + continue + elif dataset_name == 't2m_babel_272': + if name.split('_')[0] == 'seq': + # seq_1, seq_2, ... (Babel-272-stream) + motion = np.load(pjoin(self.babel_stream_motion_dir, name + '.npy')) + else: + # (HumanML3D-272) + motion = np.load(pjoin(self.hml_motion_dir, name + '.npy')) + if (len(motion)) < min_motion_len: + continue + + data_dict[name] = {'motion': motion, + 'length': len(motion), + 'name': name} + new_name_list.append(name) + length_list.append(len(motion)) + except: + pass + + + self.mean = mean + self.std = std + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = new_name_list + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) + + def __getitem__(self, item): + name = self.name_list[item] + data = self.data_dict[name] + motion, m_length = data['motion'], data['length'] + + m_length = (m_length // self.unit_length) * self.unit_length + + idx = random.randint(0, len(motion) - m_length) + motion = motion[idx:idx+m_length] + + # "Z Normalization" + motion = (motion - self.mean) / self.std + + return motion, name + +def DATALoader(dataset_name, + batch_size = 1, + num_workers = 8, unit_length = 4) : + + train_loader = torch.utils.data.DataLoader(MotionDataset(dataset_name, unit_length=unit_length), + batch_size, + shuffle=True, + num_workers=num_workers, + drop_last = True) + + return train_loader + +def cycle(iterable): + while True: + for x in iterable: + yield x \ No newline at end of file diff --git a/humanml3d_272/mean_std/Mean.npy b/humanml3d_272/mean_std/Mean.npy new file mode 100644 index 0000000000000000000000000000000000000000..83b1e4c10c7db0b87e227bc4aeed438c47d62478 --- /dev/null +++ b/humanml3d_272/mean_std/Mean.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1200c165ac97501676de3a7f4e870ec4de23f579d6b1d63227d08a5c80ad1272 +size 2304 diff --git a/humanml3d_272/mean_std/Std.npy b/humanml3d_272/mean_std/Std.npy new file mode 100644 index 0000000000000000000000000000000000000000..a8e27387c2bfd45599c6d3cd28e2854bcd78bee1 --- /dev/null +++ b/humanml3d_272/mean_std/Std.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb699c61755042e3b3c20e64723edf39f4b374cfd71f860efe9a4515f105d99c +size 2304 diff --git a/humanml3d_272/motion_data.zip b/humanml3d_272/motion_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9d30d8ec2400ce6601d6db9c78bb69554f04db29 --- /dev/null +++ b/humanml3d_272/motion_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39d92b459b83159c1e30bdca319f60389221b68301a1c3f1f70dbe3b3def05e5 +size 8355859664 diff --git a/humanml3d_272/prepare/download_Causal_TAE_t2m_272_ckpt.py b/humanml3d_272/prepare/download_Causal_TAE_t2m_272_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf21ce46dd00b2ca46a953dc783cfa7a60592b5 --- /dev/null +++ b/humanml3d_272/prepare/download_Causal_TAE_t2m_272_ckpt.py @@ -0,0 +1,14 @@ +import os +os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' + +from huggingface_hub import snapshot_download + +snapshot_download( + repo_id="lxxiao/MotionStreamer", + repo_type="model", + local_dir="./", + allow_patterns=["Causal_TAE/*"], + local_dir_use_symlinks=False, + resume_download=True, + max_workers=8 +) \ No newline at end of file diff --git a/humanml3d_272/prepare/download_Causal_TAE_t2m_babel_272_ckpt.py b/humanml3d_272/prepare/download_Causal_TAE_t2m_babel_272_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..bfc101f0e11e3852cb76ce9fa1d4053941a3b9eb --- /dev/null +++ b/humanml3d_272/prepare/download_Causal_TAE_t2m_babel_272_ckpt.py @@ -0,0 +1,14 @@ +import os +os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' + +from huggingface_hub import snapshot_download + +snapshot_download( + repo_id="lxxiao/MotionStreamer", + repo_type="model", + local_dir="./", + allow_patterns=["Causal_TAE_t2m_babel/*"], + local_dir_use_symlinks=False, + resume_download=True, + max_workers=8 +) \ No newline at end of file diff --git a/humanml3d_272/prepare/download_evaluator_ckpt.py b/humanml3d_272/prepare/download_evaluator_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..5c21478102886f4564eef74a3578fb851cae7c00 --- /dev/null +++ b/humanml3d_272/prepare/download_evaluator_ckpt.py @@ -0,0 +1,14 @@ +import os +os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' + +from huggingface_hub import snapshot_download + +snapshot_download( + repo_id="lxxiao/MotionStreamer", + repo_type="model", + local_dir="./", + allow_patterns=["Evaluator_272/*"], + local_dir_use_symlinks=False, + resume_download=True, + max_workers=8 +) \ No newline at end of file diff --git a/humanml3d_272/texts.zip b/humanml3d_272/texts.zip new file mode 100644 index 0000000000000000000000000000000000000000..3e255da87f5c88d1c8b09a6a2ff7f2347ba0c0c7 --- /dev/null +++ b/humanml3d_272/texts.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f571593d39c305ed7df5836f052253159242a8e215f12cbf32b61fc9dd29b827 +size 12180150 diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..11be7459c7c985fd9fb0c11840beae4b7c877fae --- /dev/null +++ b/inference.py @@ -0,0 +1,170 @@ +import torch +import numpy as np +import os +import time +import subprocess +import sys +import smplx + +# --- Model Imports --- +from models.llama_model import LLaMAHF, LLaMAHFConfig +from models.tae import Causal_HumanTAE +from sentence_transformers import SentenceTransformer + +# --- Direct Imports from Cloned Repo's `utils` folder --- +from utils import bvh, quat +from utils.face_z_align_util import rotation_6d_to_matrix, matrix_to_axis_angle, axis_angle_to_quaternion + +# --- A simple logging helper --- +def log_step(message): + timestamp = time.strftime("%Y-%m-%d %H:%M:%S") + print(f"[{timestamp}] - {message}") + +# --- Self-Contained Conversion Function with Detailed Logging --- +def convert_to_bvh(motion_data_272, output_path="outputs/final_motion.bvh", fps=60): + log_step("--- Starting Conversion to BVH Format ---") + try: + # --- 1. Initial Data Preparation --- + njoint = 22 + motion_data_272 = motion_data_272.squeeze(0) + nfrm, _ = motion_data_272.shape + log_step(f"Input motion has {nfrm} frames and {motion_data_272.shape[1]} dimensions.") + + # --- 2. Extract Data Components from 272-dim Vector --- + log_step("Extracting rotation, velocity, and position data...") + rotations_6d = torch.from_numpy(motion_data_272[:, 8+6*njoint : 8+12*njoint]).reshape(nfrm, -1, 6) + rotations_matrix = rotation_6d_to_matrix(rotations_6d).numpy() + + global_heading_diff_rot_6d = torch.from_numpy(motion_data_272[:, 2:8]) + global_heading_diff_rot = rotation_6d_to_matrix(global_heading_diff_rot_6d).numpy() + + velocities_root_xy = motion_data_272[:, :2] + positions_no_heading = motion_data_272[:, 8 : 8+3*njoint].reshape(nfrm, -1, 3) + height = positions_no_heading[:, 0, 1] + log_step(f"Extracted rotations matrix with shape: {rotations_matrix.shape}") + + # --- 3. Reconstruct Global Heading and Translation --- + log_step("Reconstructing global heading...") + global_heading_rot = [global_heading_diff_rot[0]] + for R_rel in global_heading_diff_rot[1:]: + global_heading_rot.append(np.matmul(R_rel, global_heading_rot[-1])) + global_heading_rot = np.array(global_heading_rot) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + rotations_matrix[:, 0, ...] = np.matmul(inv_global_heading_rot, rotations_matrix[:, 0, ...]) + + log_step("Reconstructing root translation...") + velocities_root_xyz = np.zeros((nfrm, 3)) + velocities_root_xyz[:, 0] = velocities_root_xy[:, 0] + velocities_root_xyz[:, 2] = velocities_root_xy[:, 1] + velocities_root_xyz[1:, :] = np.matmul(inv_global_heading_rot[:-1], velocities_root_xyz[1:, :, None]).squeeze(-1) + root_translation = np.cumsum(velocities_root_xyz, axis=0) + root_translation[:, 1] = height + log_step(f"Reconstructed root translation with shape: {root_translation.shape}") + + # --- 4. Convert to Final SMPL Pose Format --- + log_step("Converting rotation matrices to axis-angle format...") + axis_angle = matrix_to_axis_angle(torch.from_numpy(rotations_matrix)).numpy().reshape(nfrm, -1) + + num_frames = axis_angle.shape[0] + poses_24_joints = np.zeros((num_frames, 72)) + poses_24_joints[:, :66] = axis_angle + log_step(f"Padded pose data to 24 joints for SMPL standard, new shape: {poses_24_joints.shape}") + + # --- 5. Create and Save BVH File --- + log_step("Loading SMPL model to create BVH skeleton...") + model = smplx.create(model_path="body_models/human_model_files", model_type="smpl", gender="NEUTRAL") + parents = model.parents.detach().cpu().numpy() + + rest_pose = model().joints.detach().cpu().numpy().squeeze()[:24,:] + offsets = rest_pose - rest_pose[parents] + offsets[0] = rest_pose[0] + + log_step("Converting axis-angle to euler angles for BVH...") + rotations_quat = axis_angle_to_quaternion(torch.from_numpy(poses_24_joints.reshape(-1, 24, 3))).numpy() + rotations_euler = np.degrees(quat.to_euler(rotations_quat, order="zyx")) + + positions = np.zeros_like(rotations_quat[..., :3]) + positions[:, 0] = root_translation + + log_step("Assembling final BVH data structure...") + # <<<<<<<<<<<<<<<<<<<<<<<< THE FIX IS HERE >>>>>>>>>>>>>>>>>>>>>>>> + # Use the hardcoded list of joint names from the official conversion script. + joint_names = [ + "Pelvis", "Left_hip", "Right_hip", "Spine1", "Left_knee", "Right_knee", + "Spine2", "Left_ankle", "Right_ankle", "Spine3", "Left_foot", "Right_foot", + "Neck", "Left_collar", "Right_collar", "Head", "Left_shoulder", + "Right_shoulder", "Left_elbow", "Right_elbow", "Left_wrist", "Right_wrist", + "Left_palm", "Right_palm", + ] + + bvh_data = { + "rotations": rotations_euler, + "positions": offsets + positions, + "offsets": offsets, + "parents": parents, + "names": joint_names, # Use the correct, hardcoded list + "order": "zyx", + "frametime": 1.0 / fps, + } + + log_step(f"Saving BVH file to {output_path}...") + bvh.save(output_path, bvh_data) + log_step(f"✅ BVH file saved successfully to {output_path}") + + except Exception as e: + log_step(f"❌ BVH Conversion Failed. Error: {e}") + import traceback + traceback.print_exc() + + +def main(): + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + log_step(f"Using device: {device}") + + text_prompt = "a person walks forward" + causal_tae_checkpoint = './Causal_TAE/net_last.pth' + output_dir = "outputs" + os.makedirs(output_dir, exist_ok=True) + + log_step("Loading Causal Temporal Autoencoder (TAE)...") + causal_tae = Causal_HumanTAE( + latent_dim=16, down_t=2, depth=3, stride_t=2, clip_range=[-30.0, 20.0] + ).to(device) + state_dict = torch.load(causal_tae_checkpoint, map_location=device, weights_only=True)['net'] + causal_tae.load_state_dict(state_dict, strict=True) + causal_tae.eval() + log_step("✅ TAE loaded successfully.") + + log_step("Loading Text Encoder (T5-XXL)...") + text_encoder = SentenceTransformer('sentence-transformers/sentence-t5-xxl', device=device) + log_step("✅ Text Encoder loaded successfully.") + + log_step("Loading MotionStreamer model architecture...") + config = LLaMAHFConfig.from_name("Normal_size") + motion_streamer = LLaMAHF(config).to(device) + motion_streamer.eval() + log_step("✅ MotionStreamer loaded successfully.") + + log_step(f"Starting motion generation for text: '{text_prompt}'") + with torch.no_grad(): + impossible_pose = torch.zeros(1, 4, 272, device=device) + reference_end_latent, _, _ = causal_tae.encode(impossible_pose) + reference_end_token = reference_end_latent.detach() + + log_step("Autoregressive generation started...") + motion_latents = motion_streamer.sample_for_eval_CFG_inference( + clip_text=[text_prompt], clip_model=text_encoder, tokenizer='t5-xxl', + device=device, reference_end_token=reference_end_token, + cfg=4.5, threshold=3.0, temperature=1.0, length=312 + ) + log_step("✅ Autoregressive generation finished.") + + log_step("Decoding latents into 272-dim motion data...") + with torch.no_grad(): + generated_motion_272 = causal_tae.forward_decoder(motion_latents) + log_step(f"272-dim motion data shape: {generated_motion_272.shape}") + + convert_to_bvh(generated_motion_272.cpu().numpy(), output_path=os.path.join(output_dir, "final_motion.bvh")) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/__pycache__/causal_cnn.cpython-310.pyc b/models/__pycache__/causal_cnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..739dee17a5c2202edc383decd5ccaeae701638e3 Binary files /dev/null and b/models/__pycache__/causal_cnn.cpython-310.pyc differ diff --git a/models/__pycache__/causal_cnn.cpython-312.pyc b/models/__pycache__/causal_cnn.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec7290828769301c6302dbaf425da0ab9bec713b Binary files /dev/null and b/models/__pycache__/causal_cnn.cpython-312.pyc differ diff --git a/models/__pycache__/causal_cnn.cpython-38.pyc b/models/__pycache__/causal_cnn.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e64cd9d39f35e4b034bbe98343749d8f28d3c5e Binary files /dev/null and b/models/__pycache__/causal_cnn.cpython-38.pyc differ diff --git a/models/__pycache__/causal_cnn.cpython-39.pyc b/models/__pycache__/causal_cnn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4742f3877507d45c7418721bcb40628bedcd7b3 Binary files /dev/null and b/models/__pycache__/causal_cnn.cpython-39.pyc differ diff --git a/models/__pycache__/diffloss.cpython-310.pyc b/models/__pycache__/diffloss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93aa9439f152f68df1f429831b3245ec13ac5510 Binary files /dev/null and b/models/__pycache__/diffloss.cpython-310.pyc differ diff --git a/models/__pycache__/diffloss.cpython-312.pyc b/models/__pycache__/diffloss.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..868457d75cd01ff8481a38485df7e77e04529304 Binary files /dev/null and b/models/__pycache__/diffloss.cpython-312.pyc differ diff --git a/models/__pycache__/diffloss.cpython-38.pyc b/models/__pycache__/diffloss.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b818816b1df01466b1ec4070cdfc9adb301f1e71 Binary files /dev/null and b/models/__pycache__/diffloss.cpython-38.pyc differ diff --git a/models/__pycache__/diffloss.cpython-39.pyc b/models/__pycache__/diffloss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa61f1aa72bf75a3889253b051de7bd6f8c9f549 Binary files /dev/null and b/models/__pycache__/diffloss.cpython-39.pyc differ diff --git a/models/__pycache__/llama_model.cpython-310.pyc b/models/__pycache__/llama_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5df85f7fb9eda96625492278aef0ae9885bf0ff9 Binary files /dev/null and b/models/__pycache__/llama_model.cpython-310.pyc differ diff --git a/models/__pycache__/llama_model.cpython-312.pyc b/models/__pycache__/llama_model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bef2da5f643ac350b4694ebfed7ac6210828a4d Binary files /dev/null and b/models/__pycache__/llama_model.cpython-312.pyc differ diff --git a/models/__pycache__/llama_model.cpython-38.pyc b/models/__pycache__/llama_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0374c41be57641060b59aa415f4b931421d803a2 Binary files /dev/null and b/models/__pycache__/llama_model.cpython-38.pyc differ diff --git a/models/__pycache__/llama_model.cpython-39.pyc b/models/__pycache__/llama_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5fddbe5684c289d2542892f49164de37eb66ac3 Binary files /dev/null and b/models/__pycache__/llama_model.cpython-39.pyc differ diff --git a/models/__pycache__/resnet.cpython-310.pyc b/models/__pycache__/resnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b69c9d6d00e8881b1f9df57b5c8f13ed30872d3 Binary files /dev/null and b/models/__pycache__/resnet.cpython-310.pyc differ diff --git a/models/__pycache__/resnet.cpython-312.pyc b/models/__pycache__/resnet.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab288e65bf3983b1e3b60cfbd453ffec1a847c8 Binary files /dev/null and b/models/__pycache__/resnet.cpython-312.pyc differ diff --git a/models/__pycache__/resnet.cpython-38.pyc b/models/__pycache__/resnet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fde339e4d33f3d5e145c95c4b752773ea1e1398 Binary files /dev/null and b/models/__pycache__/resnet.cpython-38.pyc differ diff --git a/models/__pycache__/resnet.cpython-39.pyc b/models/__pycache__/resnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..754bf514c62a68530f064ac327526882d822ab6f Binary files /dev/null and b/models/__pycache__/resnet.cpython-39.pyc differ diff --git a/models/__pycache__/tae.cpython-310.pyc b/models/__pycache__/tae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33a295fd4bb8232a5467bbe0e99e1cc983b636fa Binary files /dev/null and b/models/__pycache__/tae.cpython-310.pyc differ diff --git a/models/__pycache__/tae.cpython-312.pyc b/models/__pycache__/tae.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f040cdfca70dff798e87fbcd7a890c9b714a108 Binary files /dev/null and b/models/__pycache__/tae.cpython-312.pyc differ diff --git a/models/__pycache__/tae.cpython-38.pyc b/models/__pycache__/tae.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5e23ec706cd256edf94e3a29cb214abc6f3f2b1 Binary files /dev/null and b/models/__pycache__/tae.cpython-38.pyc differ diff --git a/models/__pycache__/tae.cpython-39.pyc b/models/__pycache__/tae.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c500ca7016cb20062cdc79471c79189da863ef07 Binary files /dev/null and b/models/__pycache__/tae.cpython-39.pyc differ diff --git a/models/causal_cnn.py b/models/causal_cnn.py new file mode 100644 index 0000000000000000000000000000000000000000..64e4a374016be505d84c25fe4d654004e64576ba --- /dev/null +++ b/models/causal_cnn.py @@ -0,0 +1,108 @@ +import torch +import torch.nn as nn +from models.resnet import CausalResnet1D + + +class CausalConv1d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): + super(CausalConv1d, self).__init__() + self.pad = (kernel_size - 1) * dilation + (1 - stride) + self.conv = nn.Conv1d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=0, # no padding here + dilation=dilation + ) + + def forward(self, x): + x = nn.functional.pad(x, (self.pad, 0)) # only pad on the left + return self.conv(x) + + +class CausalEncoder(nn.Module): + def __init__(self, + input_emb_width = 272, + hidden_size = 1024, + down_t = 2, + stride_t = 2, + width = 1024, + depth = 3, + dilation_growth_rate = 3, + activation='relu', + norm=None, + latent_dim=16, + clip_range = [] + ): + super().__init__() + self.clip_range = clip_range + self.proj = nn.Linear(width, latent_dim*2) + + blocks = [] + filter_t, pad_t = stride_t * 2, stride_t // 2 + + + blocks.append(CausalConv1d(input_emb_width, width, 3, 1, 1)) + blocks.append(nn.ReLU()) + + for i in range(down_t): + input_dim = width + block = nn.Sequential( + CausalConv1d(input_dim, width, filter_t, stride_t, 1), + CausalResnet1D(width, depth, dilation_growth_rate, activation=activation, norm=norm), + ) + blocks.append(block) + blocks.append(CausalConv1d(width, hidden_size, 3, 1, 1)) + self.model = nn.Sequential(*blocks) + + def reparameterize(self, mu, logvar): + std = torch.exp(0.5 * logvar) + eps = torch.randn_like(std) + return mu + eps * std + + def forward(self, x): + x = self.model(x) + x = x.transpose(1, 2) + x = self.proj(x) + mu, logvar = x.chunk(2, dim=2) + logvar = torch.clamp(logvar, self.clip_range[0], self.clip_range[1]) + z = self.reparameterize(mu, logvar) + + return z, mu, logvar + +class CausalDecoder(nn.Module): + def __init__(self, + input_emb_width = 272, + hidden_size = 1024, + down_t = 2, + stride_t = 2, + width = 1024, + depth = 3, + dilation_growth_rate = 3, + activation='relu', + norm=None + ): + super().__init__() + blocks = [] + + filter_t, pad_t = stride_t * 2, stride_t // 2 + blocks.append(CausalConv1d(hidden_size, width, 3, 1, 1)) + blocks.append(nn.ReLU()) + for i in range(down_t): + out_dim = width + block = nn.Sequential( + CausalResnet1D(width, depth, dilation_growth_rate, reverse_dilation=True, activation=activation, norm=norm), + nn.Upsample(scale_factor=2, mode='nearest'), + CausalConv1d(width, out_dim, 3, 1, 1) + ) + blocks.append(block) + blocks.append(CausalConv1d(width, width, 3, 1, 1)) + blocks.append(nn.ReLU()) + blocks.append(CausalConv1d(width, input_emb_width, 3, 1, 1)) + + self.model = nn.Sequential(*blocks) + + def forward(self, z): + z = z.transpose(1, 2) + return self.model(z) diff --git a/models/diffloss.py b/models/diffloss.py new file mode 100644 index 0000000000000000000000000000000000000000..d51cedf219aba4d4d0f51ef4a16599ad1a627b2b --- /dev/null +++ b/models/diffloss.py @@ -0,0 +1,258 @@ +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint +import math +from timm.layers.mlp import SwiGLU +from models.diffusion import create_diffusion + + +class DiffLoss(nn.Module): + """Diffusion Loss""" + def __init__(self, target_channels, z_channels, depth, width, num_sampling_steps, grad_checkpointing=False, learn_sigma=False): + super(DiffLoss, self).__init__() + self.in_channels = target_channels + self.net = SimpleMLPAdaLN( + in_channels=target_channels, + model_channels=width, + out_channels=target_channels * 2 if learn_sigma else target_channels, + z_channels=z_channels, + num_res_blocks=depth, + grad_checkpointing=grad_checkpointing + ) + + self.train_diffusion = create_diffusion(timestep_respacing="", noise_schedule="cosine") + self.gen_diffusion = create_diffusion(timestep_respacing=num_sampling_steps, noise_schedule="cosine") + + def forward(self, target, z, mask=None): + t = torch.randint(0, self.train_diffusion.num_timesteps, (target.shape[0],), device=target.device) + model_kwargs = dict(c=z) + loss_dict = self.train_diffusion.training_losses(self.net, target, t, model_kwargs) + loss = loss_dict["loss"] + pred_xstart = loss_dict["pred_xstart"] + if mask is not None: + loss = (loss * mask).sum() / mask.sum() + return loss.mean(), pred_xstart + + def sample(self, z, temperature=1.0, cfg=1.0): + + if not cfg == 1.0: + noise = torch.randn(z.shape[0] // 2, self.in_channels).to(z.device) + noise = torch.cat([noise, noise], dim=0) + model_kwargs = dict(c=z, cfg_scale=cfg) + sample_fn = self.net.forward_with_cfg + else: + noise = torch.randn(z.shape[0], self.in_channels).to(z.device) + model_kwargs = dict(c=z) + sample_fn = self.net.forward + + sampled_token_latent = self.gen_diffusion.p_sample_loop( + sample_fn, noise.shape, noise, clip_denoised=False, model_kwargs=model_kwargs, progress=False, + temperature=temperature + ) + + return sampled_token_latent + + +def modulate(x, shift, scale): + return x * (1 + scale) + shift + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size) + t_emb = self.mlp(t_freq) + return t_emb + + +class ResBlock(nn.Module): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + """ + + def __init__( + self, + channels + ): + super().__init__() + self.channels = channels + + self.in_ln = nn.LayerNorm(channels, eps=1e-6) + + self.mlp = nn.Sequential( + nn.Linear(channels, channels, bias=True), + nn.SiLU(), + nn.Linear(channels, channels, bias=True), + ) + + + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(channels, 3 * channels, bias=True) + ) + + def forward(self, x, y): + shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(y).chunk(3, dim=-1) + h = modulate(self.in_ln(x), shift_mlp, scale_mlp) + h = self.mlp(h) + return x + gate_mlp * h + + +class FinalLayer(nn.Module): + """ + The final layer adopted from DiT. + """ + def __init__(self, model_channels, out_channels): + super().__init__() + self.norm_final = nn.LayerNorm(model_channels, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear(model_channels, out_channels, bias=True) + + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(model_channels, 2 * model_channels, bias=True) + ) + + def forward(self, x, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1) + x = modulate(self.norm_final(x), shift, scale) + x = self.linear(x) + return x + + +class SimpleMLPAdaLN(nn.Module): + """ + The MLP for Diffusion Loss. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param z_channels: channels in the condition. + :param num_res_blocks: number of residual blocks per downsample. + """ + + def __init__( + self, + in_channels, + model_channels, + out_channels, + z_channels, + num_res_blocks, + grad_checkpointing=False + ): + super().__init__() + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.grad_checkpointing = grad_checkpointing + + self.time_embed = TimestepEmbedder(model_channels) + self.cond_embed = nn.Linear(z_channels, model_channels) + + self.input_proj = nn.Linear(in_channels, model_channels) + + res_blocks = [] + for i in range(num_res_blocks): + res_blocks.append(ResBlock( + model_channels + )) + + self.res_blocks = nn.ModuleList(res_blocks) + self.final_layer = FinalLayer(model_channels, out_channels) + + self.initialize_weights() + + def initialize_weights(self): + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + # Initialize timestep embedding MLP + nn.init.normal_(self.time_embed.mlp[0].weight, std=0.02) + nn.init.normal_(self.time_embed.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers + + for block in self.res_blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.linear.weight, 0) + nn.init.constant_(self.final_layer.linear.bias, 0) + + def forward(self, x, t, c): + """ + Apply the model to an input batch. + :param x: an [N x C] Tensor of inputs. + :param t: a 1-D batch of timesteps. + :param c: conditioning from AR transformer. + :return: an [N x C] Tensor of outputs. + """ + + + + x = x.float() + + x = self.input_proj(x) + t = self.time_embed(t) + c = self.cond_embed(c) + + + y = t + c + + if self.grad_checkpointing and not torch.jit.is_scripting(): + for block in self.res_blocks: + x = checkpoint(block, x, y) + else: + for block in self.res_blocks: + x = block(x, y) + + return self.final_layer(x, y) + + def forward_with_cfg(self, x, t, c, cfg_scale): + half = x[: len(x) // 2] + combined = torch.cat([half, half], dim=0) + model_out = self.forward(combined, t, c) + eps, rest = model_out[:, :self.in_channels], model_out[:, self.in_channels:] + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps) + eps = torch.cat([half_eps, half_eps], dim=0) + return torch.cat([eps, rest], dim=1) diff --git a/models/diffusion/__init__.py b/models/diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4210525a4ea1db05b7541e3a3ed807b75730171b --- /dev/null +++ b/models/diffusion/__init__.py @@ -0,0 +1,47 @@ +# Adopted from DiT, which is modified from OpenAI's diffusion repos +# DiT: https://github.com/facebookresearch/DiT/diffusion +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py + +from . import gaussian_diffusion as gd +from .respace import SpacedDiffusion, space_timesteps + + +def create_diffusion( + timestep_respacing, + noise_schedule="linear", + use_kl=False, + sigma_small=True, + predict_xstart=False, + learn_sigma=False, + rescale_learned_sigmas=False, + diffusion_steps=50 +): + betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps) + if use_kl: + loss_type = gd.LossType.RESCALED_KL + elif rescale_learned_sigmas: + loss_type = gd.LossType.RESCALED_MSE + else: + loss_type = gd.LossType.MSE + if timestep_respacing is None or timestep_respacing == "": + timestep_respacing = [diffusion_steps] + return SpacedDiffusion( + use_timesteps=space_timesteps(diffusion_steps, timestep_respacing), + betas=betas, + model_mean_type=( + gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X + ), + model_var_type=( + ( + gd.ModelVarType.FIXED_LARGE + if not sigma_small + else gd.ModelVarType.FIXED_SMALL + ) + if not learn_sigma + else gd.ModelVarType.LEARNED_RANGE + ), + loss_type=loss_type + # rescale_timesteps=rescale_timesteps, + ) diff --git a/models/diffusion/__pycache__/__init__.cpython-310.pyc b/models/diffusion/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..901ed66ef05dddf04f601a689b23a7206314910b Binary files /dev/null and b/models/diffusion/__pycache__/__init__.cpython-310.pyc differ diff --git a/models/diffusion/__pycache__/__init__.cpython-312.pyc b/models/diffusion/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e10d6e6127bcd5a948203dcc32e2c5643a0bcac Binary files /dev/null and b/models/diffusion/__pycache__/__init__.cpython-312.pyc differ diff --git a/models/diffusion/__pycache__/__init__.cpython-38.pyc b/models/diffusion/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228e51b6a4255ee7622a95dc33a9a1dfb9319588 Binary files /dev/null and b/models/diffusion/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/diffusion/__pycache__/__init__.cpython-39.pyc b/models/diffusion/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..217d3e78463399b0a789ab44ec83aed9fb3d99d3 Binary files /dev/null and b/models/diffusion/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/diffusion/__pycache__/diffusion_utils.cpython-310.pyc b/models/diffusion/__pycache__/diffusion_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd3ce6bebeeb85cfea150aa29766c45ee8da544f Binary files /dev/null and b/models/diffusion/__pycache__/diffusion_utils.cpython-310.pyc differ diff --git a/models/diffusion/__pycache__/diffusion_utils.cpython-312.pyc b/models/diffusion/__pycache__/diffusion_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca293dbdf9b81d4f89e330377ef6f1524fa404fe Binary files /dev/null and b/models/diffusion/__pycache__/diffusion_utils.cpython-312.pyc differ diff --git a/models/diffusion/__pycache__/diffusion_utils.cpython-38.pyc b/models/diffusion/__pycache__/diffusion_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33b9ee7374348039011e87992f52f26475cec55d Binary files /dev/null and b/models/diffusion/__pycache__/diffusion_utils.cpython-38.pyc differ diff --git a/models/diffusion/__pycache__/diffusion_utils.cpython-39.pyc b/models/diffusion/__pycache__/diffusion_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50f454368f8edc62cad425dc066e8deae1ce33a9 Binary files /dev/null and b/models/diffusion/__pycache__/diffusion_utils.cpython-39.pyc differ diff --git a/models/diffusion/__pycache__/gaussian_diffusion.cpython-310.pyc b/models/diffusion/__pycache__/gaussian_diffusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bba2ac36dfa789fd6fe42b2b0ec9ade5a8dd0448 Binary files /dev/null and b/models/diffusion/__pycache__/gaussian_diffusion.cpython-310.pyc differ diff --git a/models/diffusion/__pycache__/gaussian_diffusion.cpython-312.pyc b/models/diffusion/__pycache__/gaussian_diffusion.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..122356bb1e1d92b5475ceb7713c25dd791375eb3 Binary files /dev/null and b/models/diffusion/__pycache__/gaussian_diffusion.cpython-312.pyc differ diff --git a/models/diffusion/__pycache__/gaussian_diffusion.cpython-38.pyc b/models/diffusion/__pycache__/gaussian_diffusion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91dda8de1e7375bf0bd701b82fbb1f0797645c19 Binary files /dev/null and b/models/diffusion/__pycache__/gaussian_diffusion.cpython-38.pyc differ diff --git a/models/diffusion/__pycache__/gaussian_diffusion.cpython-39.pyc b/models/diffusion/__pycache__/gaussian_diffusion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1dcc24ad1b49b55626aba775163c282b6496f20 Binary files /dev/null and b/models/diffusion/__pycache__/gaussian_diffusion.cpython-39.pyc differ diff --git a/models/diffusion/__pycache__/respace.cpython-310.pyc b/models/diffusion/__pycache__/respace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84dcade920dcbc73b7235b6f363cbdf3d151ba9 Binary files /dev/null and b/models/diffusion/__pycache__/respace.cpython-310.pyc differ diff --git a/models/diffusion/__pycache__/respace.cpython-312.pyc b/models/diffusion/__pycache__/respace.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b738fbff403de9de09e195253b4677fa24eeeab Binary files /dev/null and b/models/diffusion/__pycache__/respace.cpython-312.pyc differ diff --git a/models/diffusion/__pycache__/respace.cpython-38.pyc b/models/diffusion/__pycache__/respace.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a1e52a84556c2ff542e6521d90e2bcc6ecaba9e Binary files /dev/null and b/models/diffusion/__pycache__/respace.cpython-38.pyc differ diff --git a/models/diffusion/__pycache__/respace.cpython-39.pyc b/models/diffusion/__pycache__/respace.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16566b188e06be95760a35c6dfee536225519725 Binary files /dev/null and b/models/diffusion/__pycache__/respace.cpython-39.pyc differ diff --git a/models/diffusion/diffusion_utils.py b/models/diffusion/diffusion_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..86ef6a9ed81767302fbe36900013dfe1abaf25f4 --- /dev/null +++ b/models/diffusion/diffusion_utils.py @@ -0,0 +1,73 @@ +# Modified from OpenAI's diffusion repos +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py + +import torch as th +import numpy as np + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, th.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for th.exp(). + logvar1, logvar2 = [ + x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + th.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * th.exp(-logvar2) + ) + + +def approx_standard_normal_cdf(x): + """ + A fast approximation of the cumulative distribution function of the + standard normal. + """ + return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) + + +def discretized_gaussian_log_likelihood(x, *, means, log_scales): + """ + Compute the log-likelihood of a Gaussian distribution discretizing to a + given image. + :param x: the target images. It is assumed that this was uint8 values, + rescaled to the range [-1, 1]. + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + assert x.shape == means.shape == log_scales.shape + centered_x = x - means + inv_stdv = th.exp(-log_scales) + plus_in = inv_stdv * (centered_x + 1.0 / 255.0) + cdf_plus = approx_standard_normal_cdf(plus_in) + min_in = inv_stdv * (centered_x - 1.0 / 255.0) + cdf_min = approx_standard_normal_cdf(min_in) + log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) + log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) + cdf_delta = cdf_plus - cdf_min + log_probs = th.where( + x < -0.999, + log_cdf_plus, + th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), + ) + assert log_probs.shape == x.shape + return log_probs diff --git a/models/diffusion/gaussian_diffusion.py b/models/diffusion/gaussian_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..c03301b925e2976c7eb3a269bf4fb10a9f90bb0f --- /dev/null +++ b/models/diffusion/gaussian_diffusion.py @@ -0,0 +1,917 @@ +# Modified from OpenAI's diffusion repos +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py + + +import math + +import numpy as np +import torch as th +import enum + +from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) # tensor.shape = torch.Size([124, 512]) + + +class ModelMeanType(enum.Enum): + """ + Which type of output the model predicts. + """ + + PREVIOUS_X = enum.auto() # the model predicts x_{t-1} + START_X = enum.auto() # the model predicts x_0 + EPSILON = enum.auto() # the model predicts epsilon + + +class ModelVarType(enum.Enum): + """ + What is used as the model's output variance. + The LEARNED_RANGE option has been added to allow the model to predict + values between FIXED_SMALL and FIXED_LARGE, making its job easier. + """ + + LEARNED = enum.auto() + FIXED_SMALL = enum.auto() + FIXED_LARGE = enum.auto() + LEARNED_RANGE = enum.auto() + + +class LossType(enum.Enum): + MSE = enum.auto() # use raw MSE loss (and KL when learning variances) + RESCALED_MSE = ( + enum.auto() + ) # use raw MSE loss (with RESCALED_KL when learning variances) + KL = enum.auto() # use the variational lower-bound + RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB + + def is_vb(self): + return self == LossType.KL or self == LossType.RESCALED_KL + + +def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac): + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + warmup_time = int(num_diffusion_timesteps * warmup_frac) + betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64) + return betas + + +def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): + """ + This is the deprecated API for creating beta schedules. + See get_named_beta_schedule() for the new library of schedules. + """ + if beta_schedule == "quad": + betas = ( + np.linspace( + beta_start ** 0.5, + beta_end ** 0.5, + num_diffusion_timesteps, + dtype=np.float64, + ) + ** 2 + ) + elif beta_schedule == "linear": + betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "warmup10": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1) + elif beta_schedule == "warmup50": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5) + elif beta_schedule == "const": + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 + betas = 1.0 / np.linspace( + num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64 + ) + else: + raise NotImplementedError(beta_schedule) + assert betas.shape == (num_diffusion_timesteps,) + return betas + + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): + """ + Get a pre-defined beta schedule for the given name. + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = 1000 / num_diffusion_timesteps + return get_beta_schedule( + "linear", + beta_start=scale * 0.0001, + beta_end=scale * 0.02, + num_diffusion_timesteps=num_diffusion_timesteps, + ) + elif schedule_name == "cosine": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + + + +class GaussianDiffusion: + """ + Utilities for training and sampling diffusion models. + Original ported from this codebase: + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 + :param betas: a 1-D numpy array of betas for each diffusion timestep, + starting at T and going to 1. + """ + + def __init__( + self, + *, + betas, + model_mean_type, + model_var_type, + loss_type + ): + + self.model_mean_type = model_mean_type + self.model_var_type = model_var_type + self.loss_type = loss_type + + # Use float64 for accuracy. + betas = np.array(betas, dtype=np.float64) + self.betas = betas + assert len(betas.shape) == 1, "betas must be 1-D" + assert (betas > 0).all() and (betas <= 1).all() + + self.num_timesteps = int(betas.shape[0]) + + alphas = 1.0 - betas + self.alphas_cumprod = np.cumprod(alphas, axis=0) + self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) + self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) + assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) + self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) + self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) + self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + self.posterior_variance = ( + betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + ) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.posterior_log_variance_clipped = np.log( + np.append(self.posterior_variance[1], self.posterior_variance[1:]) + ) if len(self.posterior_variance) > 1 else np.array([]) + + self.posterior_mean_coef1 = ( + betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + ) + self.posterior_mean_coef2 = ( + (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod) + ) + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def q_sample(self, x_start, t, noise=None): + """ + Diffuse the data for a given number of diffusion steps. + In other words, sample from q(x_t | x_0). + :param x_start: the initial data batch. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :param noise: if specified, the split-out normal noise. + :return: A noisy version of x_start. + """ + if noise is None: + noise = th.randn_like(x_start) + assert noise.shape == x_start.shape + return ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def q_posterior_mean_variance(self, x_start, x_t, t): + """ + Compute the mean and variance of the diffusion posterior: + q(x_{t-1} | x_t, x_0) + """ + assert x_start.shape == x_t.shape + posterior_mean = ( + _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = _extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + assert ( + posterior_mean.shape[0] + == posterior_variance.shape[0] + == posterior_log_variance_clipped.shape[0] + == x_start.shape[0] + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None): + """ + Apply the model to get p(x_{t-1} | x_t), as well as a prediction of + the initial x, x_0. + :param model: the model, which takes a signal and a batch of timesteps + as input. + :param x: the [N x C x ...] tensor at time t. + :param t: a 1-D Tensor of timesteps. + :param clip_denoised: if True, clip the denoised signal into [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. Applies before + clip_denoised. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict with the following keys: + - 'mean': the model mean output. + - 'variance': the model variance output. + - 'log_variance': the log of 'variance'. + - 'pred_xstart': the prediction for x_0. + """ + if model_kwargs is None: + model_kwargs = {} + + B, C = x.shape[:2] + assert t.shape == (B,) + model_output = model(x, t, **model_kwargs) # 调用forward_withcfg函数得到 torch.Size([2, 512]) + if isinstance(model_output, tuple): + model_output, extra = model_output + else: + extra = None + + if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: + assert model_output.shape == (B, C * 2, *x.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape) + max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) + # The model_var_values is [-1, 1] for [min_var, max_var]. + frac = (model_var_values + 1) / 2 + model_log_variance = frac * max_log + (1 - frac) * min_log + model_variance = th.exp(model_log_variance) + else: + model_variance, model_log_variance = { + # for fixedlarge, we set the initial (log-)variance like so + # to get a better decoder log likelihood. + ModelVarType.FIXED_LARGE: ( + np.append(self.posterior_variance[1], self.betas[1:]), + np.log(np.append(self.posterior_variance[1], self.betas[1:])), + ), + ModelVarType.FIXED_SMALL: ( + self.posterior_variance, + self.posterior_log_variance_clipped, + ), + }[self.model_var_type] # ModelVarType.FIXED_SMALL + model_variance = _extract_into_tensor(model_variance, t, x.shape) + model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) + + def process_xstart(x): + if denoised_fn is not None: + x = denoised_fn(x) + if clip_denoised: + return x.clamp(-1, 1) + return x + + + if self.model_mean_type == ModelMeanType.START_X: + pred_xstart = process_xstart(model_output) + else: # here + pred_xstart = process_xstart( + self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) + ) + model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) + + # print(f't: {t}; Out: {model_output[0,:5]}, Mean: {model_mean[0,:5]}, x_0: {pred_xstart[0,:5]}') + assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape + return { + "mean": model_mean, + "variance": model_variance, + "log_variance": model_log_variance, + "pred_xstart": pred_xstart, + "extra": extra, + } + + def _predict_xstart_from_eps(self, x_t, t, eps): + assert x_t.shape == eps.shape + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + ) + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute the mean for the previous step, given a function cond_fn that + computes the gradient of a conditional log probability with respect to + x. In particular, cond_fn computes grad(log(p(y|x))), and we want to + condition on y. + This uses the conditioning strategy from Sohl-Dickstein et al. (2015). + """ + gradient = cond_fn(x, t, **model_kwargs) + new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() + return new_mean + + def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute what the p_mean_variance output would have been, should the + model's score function be conditioned by cond_fn. + See condition_mean() for details on cond_fn. + Unlike condition_mean(), this instead uses the conditioning strategy + from Song et al (2020). + """ + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + + eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) + eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs) + + out = p_mean_var.copy() + out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) + out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) + return out + + def p_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + temperature=1.0 + ): + """ + Sample x_{t-1} from the model at the given timestep. + :param model: the model to sample from. + :param x: the current tensor at x_{t-1}. + :param t: the value of t, starting at 0 for the first diffusion step. + :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param temperature: temperature scaling during Diff Loss sampling. + :return: a dict containing the following keys: + - 'sample': a random sample from the model. + - 'pred_xstart': a prediction of x_0. + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = th.randn_like(x) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + if cond_fn is not None: + out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs) + # scale the noise by temperature + sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise * temperature + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + + # def generate_unconditionally(self, model, x_start, t, noise=None): + # """ + # Generate samples unconditionally using the given model. + + # :param model: the model to generate samples with. + # :param x_start: the [N x C x ...] tensor of inputs. + # :param t: a batch of timestep indices. + # :param noise: if specified, the specific Gaussian noise to use. + # :return: a tensor of generated samples. + # """ + # # Ensure model_kwargs is an empty dictionary for unconditional generation + # model_kwargs = {} + + # # Generate random noise if not provided + # if noise is None: + # noise = th.randn_like(x_start) + + # # Sample x_t using the q_sample function + # x_t = self.q_sample(x_start, t, noise=noise) + + # # Generate samples using the model without any conditions + # generated_samples = model(x_t, t, **model_kwargs) + + # return generated_samples + + def p_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + temperature=1.0, + ): + """ + Generate samples from the model. + :param model: the model module. + :param shape: the shape of the samples, (N, C, H, W). + :param noise: if specified, the noise from the encoder to sample. + Should be of the same shape as `shape`. + :param clip_denoised: if True, clip x_start predictions to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param device: if specified, the device to create the samples on. + If not specified, use a model parameter's device. + :param progress: if True, show a tqdm progress bar. + :param temperature: temperature scaling during Diff Loss sampling. + :return: a non-differentiable batch of samples. + """ + final = None + for sample in self.p_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + temperature=temperature, + ): + final = sample + + return final["sample"] + + def p_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + temperature=1.0, + ): + """ + Generate samples from the model and yield intermediate samples from + each timestep of diffusion. + Arguments are the same as p_sample_loop(). + Returns a generator over dicts, where each dict is the return value of + p_sample(). + """ + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape).cuda() + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0]).cuda() + with th.no_grad(): + out = self.p_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + temperature=temperature, + ) + yield out + img = out["sample"] + + def ddim_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t-1} from the model using DDIM. + Same usage as p_sample(). + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + sigma = ( + eta + * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * th.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + # Equation 12. + noise = th.randn_like(x) + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_prev) + + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + ) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def ddim_reverse_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t+1} from the model using DDIM reverse ODE. + """ + assert eta == 0.0, "Reverse ODE only for deterministic path" + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x + - out["pred_xstart"] + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) + + # Equation 12. reversed + mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps + + return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} + + def ddim_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Generate samples from the model using DDIM. + Same usage as p_sample_loop(). + """ + final = None + for sample in self.ddim_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + eta=eta, + ): + final = sample + return final["sample"] + + def ddim_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Use DDIM to sample from the model and yield intermediate samples from + each timestep of DDIM. + Same usage as p_sample_loop_progressive(). + """ + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape).cuda() + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0]).cuda() + with th.no_grad(): + out = self.ddim_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + eta=eta, + ) + yield out + img = out["sample"] + + def _vb_terms_bpd( + self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None + ): + """ + Get a term for the variational lower-bound. + The resulting units are bits (rather than nats, as one might expect). + This allows for comparison to other papers. + :return: a dict with the following keys: + - 'output': a shape [N] tensor of NLLs or KLs. + - 'pred_xstart': the x_0 predictions. + """ + true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance( + x_start=x_start, x_t=x_t, t=t + ) + out = self.p_mean_variance( + model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs + ) + kl = normal_kl( + true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] + ) + kl = mean_flat(kl) / np.log(2.0) + + decoder_nll = -discretized_gaussian_log_likelihood( + x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] + ) + assert decoder_nll.shape == x_start.shape + decoder_nll = mean_flat(decoder_nll) / np.log(2.0) + + # At the first timestep return the decoder NLL, + # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) + output = th.where((t == 0), decoder_nll, kl) + return {"output": output, "pred_xstart": out["pred_xstart"]} + + def training_losses(self, model, x_start, t, model_kwargs=None, noise=None): + """ + Compute training losses for a single timestep. + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param t: a batch of timestep indices. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param noise: if specified, the specific Gaussian noise to try to remove. + :return: a dict with the key "loss" containing a tensor of shape [N]. + Some mean or variance settings may also have other keys. + """ + + if model_kwargs is None: + model_kwargs = {} + + if noise is None: + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start, t, noise=noise) + + terms = {} + + if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: + terms["loss"] = self._vb_terms_bpd( + model=model, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + model_kwargs=model_kwargs, + )["output"] + if self.loss_type == LossType.RESCALED_KL: + terms["loss"] *= self.num_timesteps + elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: + model_output = model(x_t, t, **model_kwargs) + + if self.model_var_type in [ + ModelVarType.LEARNED, + ModelVarType.LEARNED_RANGE, + ]: + B, C = x_t.shape[:2] + assert model_output.shape == (B, C * 2, *x_t.shape[2:]) + + model_output, model_var_values = th.split(model_output, C, dim=1) + # Learn the variance using the variational bound, but don't let + # it affect our mean prediction. + frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) + terms["vb"] = self._vb_terms_bpd( + model=lambda *args, r=frozen_out: r, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + )["output"] + if self.loss_type == LossType.RESCALED_MSE: + # Divide by 1000 for equivalence with initial implementation. + # Without a factor of 1/1000, the VB term hurts the MSE term. + terms["vb"] *= self.num_timesteps / 1000.0 + + target = { + ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance( + x_start=x_start, x_t=x_t, t=t + )[0], + ModelMeanType.START_X: x_start, + ModelMeanType.EPSILON: noise, + }[self.model_mean_type] # PREVIOUS_X: the model predicts x_{t-1} + + + assert model_output.shape == target.shape == x_start.shape + terms["mse"] = mean_flat((target - model_output) ** 2) + if "vb" in terms: + terms["loss"] = terms["mse"] + terms["vb"] + else: + terms["loss"] = terms["mse"] + + + pred_xstart = self._predict_xstart_from_eps(x_t=x_t, t=t, eps=model_output) + terms["pred_xstart"] = pred_xstart + else: + raise NotImplementedError(self.loss_type) + + return terms + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): + """ + Compute the entire variational lower-bound, measured in bits-per-dim, + as well as other related quantities. + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param clip_denoised: if True, clip denoised samples. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - total_bpd: the total variational lower-bound, per batch element. + - prior_bpd: the prior term in the lower-bound. + - vb: an [N x T] tensor of terms in the lower-bound. + - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. + - mse: an [N x T] tensor of epsilon MSEs for each timestep. + """ + device = x_start.device + batch_size = x_start.shape[0] + + vb = [] + xstart_mse = [] + mse = [] + for t in list(range(self.num_timesteps))[::-1]: + t_batch = th.tensor([t] * batch_size, device=device) + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) + # Calculate VLB term at the current timestep + with th.no_grad(): + out = self._vb_terms_bpd( + model, + x_start=x_start, + x_t=x_t, + t=t_batch, + clip_denoised=clip_denoised, + model_kwargs=model_kwargs, + ) + vb.append(out["output"]) + xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) + eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) + mse.append(mean_flat((eps - noise) ** 2)) + + vb = th.stack(vb, dim=1) + xstart_mse = th.stack(xstart_mse, dim=1) + mse = th.stack(mse, dim=1) + + prior_bpd = self._prior_bpd(x_start) + total_bpd = vb.sum(dim=1) + prior_bpd + return { + "total_bpd": total_bpd, + "prior_bpd": prior_bpd, + "vb": vb, + "xstart_mse": xstart_mse, + "mse": mse, + } + + +def _extract_into_tensor(arr, timesteps, broadcast_shape): + """ + Extract values from a 1-D numpy array for a batch of indices. + :param arr: the 1-D numpy array. + :param timesteps: a tensor of indices into the array to extract. + :param broadcast_shape: a larger shape of K dimensions with the batch + dimension equal to the length of timesteps. + :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. + """ + res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() + while len(res.shape) < len(broadcast_shape): + res = res[..., None] + return res + th.zeros(broadcast_shape, device=timesteps.device) diff --git a/models/diffusion/respace.py b/models/diffusion/respace.py new file mode 100644 index 0000000000000000000000000000000000000000..0a2cc0435d1ace54466585db9043b284973d454e --- /dev/null +++ b/models/diffusion/respace.py @@ -0,0 +1,129 @@ +# Modified from OpenAI's diffusion repos +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py + +import numpy as np +import torch as th + +from .gaussian_diffusion import GaussianDiffusion + + +def space_timesteps(num_timesteps, section_counts): + """ + Create a list of timesteps to use from an original diffusion process, + given the number of timesteps we want to take from equally-sized portions + of the original process. + For example, if there's 300 timesteps and the section counts are [10,15,20] + then the first 100 timesteps are strided to be 10 timesteps, the second 100 + are strided to be 15 timesteps, and the final 100 are strided to be 20. + If the stride is a string starting with "ddim", then the fixed striding + from the DDIM paper is used, and only one section is allowed. + :param num_timesteps: the number of diffusion steps in the original + process to divide up. + :param section_counts: either a list of numbers, or a string containing + comma-separated numbers, indicating the step count + per section. As a special case, use "ddimN" where N + is a number of steps to use the striding from the + DDIM paper. + :return: a set of diffusion steps from the original process to use. + """ + if isinstance(section_counts, str): + if section_counts.startswith("ddim"): + desired_count = int(section_counts[len("ddim") :]) + for i in range(1, num_timesteps): + if len(range(0, num_timesteps, i)) == desired_count: + return set(range(0, num_timesteps, i)) + raise ValueError( + f"cannot create exactly {num_timesteps} steps with an integer stride" + ) + section_counts = [int(x) for x in section_counts.split(",")] + size_per = num_timesteps // len(section_counts) + extra = num_timesteps % len(section_counts) + start_idx = 0 + all_steps = [] + for i, section_count in enumerate(section_counts): + size = size_per + (1 if i < extra else 0) + if size < section_count: + raise ValueError( + f"cannot divide section of {size} steps into {section_count}" + ) + if section_count <= 1: + frac_stride = 1 + else: + frac_stride = (size - 1) / (section_count - 1) + cur_idx = 0.0 + taken_steps = [] + for _ in range(section_count): + taken_steps.append(start_idx + round(cur_idx)) + cur_idx += frac_stride + all_steps += taken_steps + start_idx += size + return set(all_steps) + + +class SpacedDiffusion(GaussianDiffusion): + """ + A diffusion process which can skip steps in a base diffusion process. + :param use_timesteps: a collection (sequence or set) of timesteps from the + original diffusion process to retain. + :param kwargs: the kwargs to create the base diffusion process. + """ + + def __init__(self, use_timesteps, **kwargs): + self.use_timesteps = set(use_timesteps) + self.timestep_map = [] + self.original_num_steps = len(kwargs["betas"]) + + base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa + last_alpha_cumprod = 1.0 + new_betas = [] + for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): + if i in self.use_timesteps: + new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) + last_alpha_cumprod = alpha_cumprod + self.timestep_map.append(i) + kwargs["betas"] = np.array(new_betas) + super().__init__(**kwargs) + + def p_mean_variance( + self, model, *args, **kwargs + ): # pylint: disable=signature-differs + return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) + + def training_losses( + self, model, *args, **kwargs + ): # pylint: disable=signature-differs + return super().training_losses(self._wrap_model(model), *args, **kwargs) + + def condition_mean(self, cond_fn, *args, **kwargs): + return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) + + def condition_score(self, cond_fn, *args, **kwargs): + return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) + + def _wrap_model(self, model): + if isinstance(model, _WrappedModel): + return model + return _WrappedModel( + model, self.timestep_map, self.original_num_steps + ) + + def _scale_timesteps(self, t): + # Scaling is done by the wrapped model. + return t + + +class _WrappedModel: + def __init__(self, model, timestep_map, original_num_steps): + self.model = model + self.timestep_map = timestep_map + # self.rescale_timesteps = rescale_timesteps + self.original_num_steps = original_num_steps + + def __call__(self, x, ts, **kwargs): + map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) + new_ts = map_tensor[ts] + # if self.rescale_timesteps: + # new_ts = new_ts.float() * (1000.0 / self.original_num_steps) + return self.model(x, new_ts, **kwargs) diff --git a/models/llama_model.py b/models/llama_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3e355cc3f1357ddc69698542ba24d7239402d2 --- /dev/null +++ b/models/llama_model.py @@ -0,0 +1,1590 @@ + +import math +from dataclasses import dataclass +import numpy as np +import torch +import torch.nn as nn +from torch.nn import functional as F +from typing_extensions import Self +from typing import Optional +from transformers.modeling_utils import PreTrainedModel +from torch.distributions import Categorical +import torch.nn.functional as F +from timm.layers.mlp import SwiGLU, Mlp + +@dataclass +class LLaMAHFConfig: + block_size: int = 78 + n_layer: int = 32 + n_head: int = 32 + n_embd: int = 4096 + T5_xxl_dim: int = 768 + + @classmethod + def from_name(cls, name: str) -> Self: + return cls(**llama_configs[name]) + + +llama_configs = { + "Normal_size": dict(n_layer=12, n_head=12, n_embd=768) +} + + +class LLaMAHF(nn.Module): + def __init__(self, config: LLaMAHFConfig, num_diffusion_head_layers=9, input_token_dim=16, device=torch.device('cuda'), width=1792) -> None: + super().__init__() + assert config.block_size is not None + self.config = config + + cond_dim = config.T5_xxl_dim + + self.transformer = nn.ModuleDict( + dict( + wte=nn.Linear(input_token_dim, config.n_embd), + cond_embed=nn.Linear(cond_dim, config.n_embd), + h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]), + ln_f=RMSNorm(config.n_embd), + ) + ) + + target_channels = input_token_dim + from models.diffloss import DiffLoss + self.diff_loss = DiffLoss( + target_channels=target_channels, + z_channels=config.n_embd, + width=width, + depth=num_diffusion_head_layers, + num_sampling_steps='50', + grad_checkpointing=False, + ) + self.diff_loss = self.diff_loss.to(device) + self.out_proj = nn.Linear(config.n_embd, config.n_embd) + + + def _tie_or_clone_weights(self, output_embeddings, input_embeddings): + """Tie or clone module weights depending of whether we are using TorchScript or not""" + output_embeddings.weight = input_embeddings.weight + + if getattr(output_embeddings, "bias", None) is not None: + output_embeddings.bias.data = nn.functional.pad( + output_embeddings.bias.data, + ( + 0, + output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], + ), + "constant", + 0, + ) + if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): + output_embeddings.out_features = input_embeddings.num_embeddings + + def get_input_embeddings(self): + return self.transformer.wte + + def set_input_embeddings(self, value): + self.transformer.wte = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _init_weights(self, module: nn.Module) -> None: + if isinstance(module, nn.Linear): + torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer)) + elif isinstance(module, nn.Embedding): + torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer)) + + + + def forward_sample(self, idx: torch.Tensor, clip_feature: torch.Tensor, y_mask) -> torch.Tensor: + + text_length = clip_feature.shape[1] + if len(idx) == 0: + x = self.llama_proj(clip_feature)[:, :int(y_mask[0].sum()), :] + else: + _, t = idx.size() + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + # forward the LLaMA model itself + x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) + x = torch.cat((self.llama_proj(clip_feature)[:, :int(y_mask[0].sum()), :],x), dim=1) + + for block in self.transformer.h: + x = block(x, y_mask) + x = self.transformer.ln_f(x) + logits = x + return logits + + + + def sample_for_eval_CFG(self, text, length=196, tokenize_model=None, device=torch.device('cuda'), unit_length=4, cfg=4.0): + max_token_len = length // unit_length + for k in range(max_token_len): + if k == 0: + x = [] + else: + x = xs + + feat_text = torch.from_numpy(tokenize_model.encode(text)).float() + feat_text = feat_text.to(device) + conditions = self.forward(x, feat_text) + conditions = conditions[:, -1, :] + + empty_text = '' + empty_feat_text = torch.from_numpy(tokenize_model.encode(empty_text)).float() + empty_feat_text = empty_feat_text.unsqueeze(0) + empty_feat_text = empty_feat_text.to(device) + empty_conditions = self.forward(x, empty_feat_text) + empty_conditions = empty_conditions[:, -1, :] + temperature = 1.0 + + # chunk + if cfg != 1: + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: # no cfg + scaled_logits = self.diff_loss.sample(conditions, temperature=temperature, cfg=1) + + scaled_logits = scaled_logits.unsqueeze(0) + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + return xs + + + + # 推理时调用,可以停止 + def sample_for_eval_CFG_inference(self, clip_text, if_categorial=False, length=312, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, threshold=3, cfg=4.5, temperature=1.0): + + import clip + max_token_len = length // unit_length + #print(f'Max_token_len: {max_token_len}') + + if tokenizer == 'clip': + text = clip.tokenize(clip_text, truncate=True).to(device) # len(cliptext)=32, text.shape=torch.Size([32, 77]) + + feat_clip_text = clip_model.encode_text(text).float() # feat_clip_text.shape=torch.Size([32, 512]) + elif tokenizer == 't5-xxl': + feat_clip_text = torch.from_numpy(clip_model.encode(clip_text)).float() # torch.Size([32, 768]) + #feat_clip_text = feat_clip_text.unsqueeze(0) + feat_clip_text = feat_clip_text.to(device) + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.encode(empty_clip_text)).float() # torch.Size([32, 768]) + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + for k in range(max_token_len): + if k == 0: + x = [] + else: + x = xs + + try: + conditions = self.forward(x, feat_clip_text) + except: + conditions = self.forward(x, feat_clip_text.unsqueeze(0)) + + + conditions = conditions[:, -1, :] + + empty_conditions = self.forward(x, empty_feat_clip_text) + empty_conditions = empty_conditions[:, -1, :] + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if reference_end_token is not None: + distance_l2 = torch.sqrt(torch.sum((scaled_logits - reference_end_token)**2)) + print(distance_l2) + if distance_l2 < threshold: + break + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + return xs + def sample_for_eval_CFG_inference2(self, feat_clip_text, empty_feat_clip_text, if_categorial=False, length=312, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, threshold=3, cfg=4.5, temperature=1.0): + + import clip + max_token_len = length // unit_length + + for k in range(max_token_len): + if k == 0: + x = [] + else: + x = xs + + try: + conditions = self.forward(x, feat_clip_text) + except: + conditions = self.forward(x, feat_clip_text.unsqueeze(0)) + + + conditions = conditions[:, -1, :] + + + + empty_conditions = self.forward(x, empty_feat_clip_text) + empty_conditions = empty_conditions[:, -1, :] + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if reference_end_token is not None: + distance_l2 = torch.sqrt(torch.sum((scaled_logits - reference_end_token)**2)) + print(distance_l2) + if distance_l2 < threshold: + break + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + return xs + + def sample_for_eval_CFG_inference_next_one(self, current_token=[], feat_clip_text=None, empty_feat_clip_text=None, if_categorial=False, length=312, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, threshold=3, cfg=4.5, temperature=1.0): + + import clip + max_token_len = length // unit_length + + + for k in range(1): + + if current_token == []: + x = [] + else: + x = torch.cat(current_token, dim=1) + + + try: + conditions = self.forward(x, feat_clip_text) + except: + conditions = self.forward(x, feat_clip_text.unsqueeze(0)) + + + conditions = conditions[:, -1, :] + + + empty_conditions = self.forward(x, empty_feat_clip_text) + empty_conditions = empty_conditions[:, -1, :] + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + + scaled_logits = scaled_logits.unsqueeze(0) + + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + return xs + + + def sample_for_eval_CFG_babel(self, A_text, B_text, A_motion, if_categorial=False, length=6400, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, cfg=7.0, threshold=3): + + import clip + B_token_length = length // unit_length - A_motion.shape[0] + + if tokenizer == 'clip': + A_text = clip.tokenize(A_text, truncate=True).to(device) + A_feat_clip_text = clip_model.encode_text(A_text).float() + B_text = clip.tokenize(B_text, truncate=True).to(device) + B_feat_clip_text = clip_model.encode_text(B_text).float() + elif tokenizer == 't5-xxl': + A_feat_clip_text = torch.from_numpy(clip_model.encode(A_text)).float() + A_feat_clip_text = A_feat_clip_text.to(device) + B_feat_clip_text = torch.from_numpy(clip_model.encode(B_text)).float() + B_feat_clip_text = B_feat_clip_text.to(device) + + A_text_embeddings = self.transformer.cond_embed(A_feat_clip_text).unsqueeze(0) + B_text_embeddings = self.transformer.cond_embed(B_feat_clip_text).unsqueeze(0) + + A_motion = A_motion.unsqueeze(0) + A_motion_embeddings = self.transformer.wte(A_motion) + B_motion = torch.tensor([]).to(device) + + for k in range(B_token_length): + if k == 0: + x = torch.cat([A_text_embeddings, A_motion_embeddings, B_text_embeddings], dim=1) + else: + x = xs + + + conditions = self.forward_babel_eval(x) + conditions = conditions[:, -1, :] + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + empty_feat_clip_text_embedding = self.transformer.cond_embed(empty_feat_clip_text).unsqueeze(0) + + if k == 0: + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, empty_feat_clip_text_embedding], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + else: + B_motion_embeddings = self.transformer.wte(B_motion) + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, empty_feat_clip_text_embedding, B_motion_embeddings], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + + empty_conditions = empty_conditions[:, -1, :] + temperature = 1.0 + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + + scaled_logits = scaled_logits.unsqueeze(0) + + + B_motion = torch.cat((B_motion, scaled_logits), dim=1) + + scaled_logits_embedding = self.transformer.wte(scaled_logits) + xs = torch.cat((x, scaled_logits_embedding), dim=1) + + + return xs, B_motion + + def sample_for_eval_CFG_babel_inference(self, A_text, B_text, A_motion, if_categorial=False, length=6400, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, cfg=7.0, threshold=3): + + import clip + B_token_length = length // unit_length - A_motion.shape[0] + + if tokenizer == 'clip': + A_text = clip.tokenize(A_text, truncate=True).to(device) + A_feat_clip_text = clip_model.encode_text(A_text).float() + B_text = clip.tokenize(B_text, truncate=True).to(device) + B_feat_clip_text = clip_model.encode_text(B_text).float() + elif tokenizer == 't5-xxl': + A_feat_clip_text = torch.from_numpy(clip_model.encode(A_text)).float() + A_feat_clip_text = A_feat_clip_text.to(device) + B_feat_clip_text = torch.from_numpy(clip_model.encode(B_text)).float() + B_feat_clip_text = B_feat_clip_text.to(device) + + A_text_embeddings = self.transformer.cond_embed(A_feat_clip_text).unsqueeze(0) + A_text_embeddings = A_text_embeddings.unsqueeze(0) + B_text_embeddings = self.transformer.cond_embed(B_feat_clip_text).unsqueeze(0) + B_text_embeddings = B_text_embeddings.unsqueeze(0) + + A_motion = A_motion.unsqueeze(0) + A_motion_embeddings = self.transformer.wte(A_motion) + B_motion = torch.tensor([]).to(device) + + attention_weights = [] + + for k in range(B_token_length): + if k == 0: + x = torch.cat([A_text_embeddings, A_motion_embeddings, B_text_embeddings], dim=1) + + else: + x = xs + + + + conditions = self.forward_babel_eval(x, return_attention=False) + conditions = conditions[:, -1, :] + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + empty_feat_clip_text_embedding = self.transformer.cond_embed(empty_feat_clip_text).unsqueeze(0) + + if k == 0: + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, empty_feat_clip_text_embedding], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + else: + B_motion_embeddings = self.transformer.wte(B_motion) + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, empty_feat_clip_text_embedding, B_motion_embeddings], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + + empty_conditions = empty_conditions[:, -1, :] + temperature = 1.0 + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if reference_end_token is not None: + distance_l2 = torch.sqrt(torch.sum((scaled_logits - reference_end_token)**2)) + print(distance_l2) + if distance_l2 < threshold: + break + + B_motion = torch.cat((B_motion, scaled_logits), dim=1) + + scaled_logits_embedding = self.transformer.wte(scaled_logits) + xs = torch.cat((x, scaled_logits_embedding), dim=1) + + + + return xs, B_motion + + + def sample_for_eval_CFG_babel_inference_new(self, B_text, A_motion, if_categorial=False, length=78, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, cfg=4.5, threshold=3): + + import clip + B_token_length = length // unit_length + + if tokenizer == 'clip': + A_text = clip.tokenize(A_text, truncate=True).to(device) + A_feat_clip_text = clip_model.encode_text(A_text).float() + B_text = clip.tokenize(B_text, truncate=True).to(device) + B_feat_clip_text = clip_model.encode_text(B_text).float() + elif tokenizer == 't5-xxl': + B_feat_clip_text = torch.from_numpy(clip_model.encode(B_text)).float() + B_feat_clip_text = B_feat_clip_text.to(device) + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + B_text_embeddings = self.transformer.cond_embed(B_feat_clip_text).unsqueeze(0) + + A_motion = A_motion.unsqueeze(0) + A_motion_embeddings = self.transformer.wte(A_motion) + B_motion = torch.tensor([]).to(device) + + + attention_weights = [] + + for k in range(B_token_length): + if k == 0: + x = torch.cat([B_text_embeddings, A_motion_embeddings], dim=1) + else: + x = xs + + conditions = self.forward_babel_eval(x, return_attention=False) + conditions = conditions[:, -1, :] + + + empty_feat_clip_text_embedding = self.transformer.cond_embed(empty_feat_clip_text).unsqueeze(0) + + if k == 0: + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings], dim=1) + + empty_conditions = self.forward_babel_eval(empty_input) + else: + B_motion_embeddings = self.transformer.wte(B_motion) + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, B_motion_embeddings], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + + empty_conditions = empty_conditions[:, -1, :] + temperature = 1.0 + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if reference_end_token is not None: + distance_l2 = torch.sqrt(torch.sum((scaled_logits - reference_end_token)**2)) + print(distance_l2) + if distance_l2 < threshold: + break + + B_motion = torch.cat((B_motion, scaled_logits), dim=1) + + scaled_logits_embedding = self.transformer.wte(scaled_logits) + xs = torch.cat((x, scaled_logits_embedding), dim=1) + + + + return xs, B_motion + + + def sample_for_eval_CFG_babel_inference_new_demo(self, B_text, A_motion, if_categorial=False, length=312, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4, reference_end_token=None, cfg=4.5, threshold=3, temperature=1.0): + + import clip + B_token_length = length // unit_length - A_motion.shape[0] + + if tokenizer == 'clip': + A_text = clip.tokenize(A_text, truncate=True).to(device) + A_feat_clip_text = clip_model.encode_text(A_text).float() + B_text = clip.tokenize(B_text, truncate=True).to(device) + B_feat_clip_text = clip_model.encode_text(B_text).float() + elif tokenizer == 't5-xxl': + B_feat_clip_text = torch.from_numpy(clip_model.encode(B_text)).float() + B_feat_clip_text = B_feat_clip_text.to(device) + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + B_text_embeddings = self.transformer.cond_embed(B_feat_clip_text).unsqueeze(0) + B_text_embeddings = B_text_embeddings.unsqueeze(0) + + A_motion = A_motion.unsqueeze(0) + A_motion_embeddings = self.transformer.wte(A_motion) + B_motion = torch.tensor([]).to(device) + + # 存储所有层的注意力权重 + attention_weights = [] + + for k in range(B_token_length): + if k == 0: + x = torch.cat([B_text_embeddings, A_motion_embeddings], dim=1) + + else: + x = xs + + + conditions = self.forward_babel_eval(x, return_attention=False) + conditions = conditions[:, -1, :] + + + empty_feat_clip_text_embedding = self.transformer.cond_embed(empty_feat_clip_text).unsqueeze(0) + + if k == 0: + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + else: + B_motion_embeddings = self.transformer.wte(B_motion) + empty_input = torch.cat([empty_feat_clip_text_embedding, A_motion_embeddings, B_motion_embeddings], dim=1) + empty_conditions = self.forward_babel_eval(empty_input) + + empty_conditions = empty_conditions[:, -1, :] + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if reference_end_token is not None: + distance_l2 = torch.sqrt(torch.sum((scaled_logits - reference_end_token)**2)) + print(distance_l2) + if distance_l2 < threshold and k > 10: + break + + B_motion = torch.cat((B_motion, scaled_logits), dim=1) + + scaled_logits_embedding = self.transformer.wte(scaled_logits) + xs = torch.cat((x, scaled_logits_embedding), dim=1) + + + + return xs, B_motion + + + + #--------------Test classification head-------------------- + def sample_for_eval_classification(self, clip_text, if_categorial=False, length=196, clip_model=None, device=torch.device('cuda'), tokenizer='clip', unit_length=4): + + import clip + + + for k in range(51): + if k == 0: + x = [] + else: + x = xs + + if tokenizer == 'clip': + text = clip.tokenize(clip_text, truncate=True).to(device) + + feat_clip_text = clip_model.encode_text(text).float() + elif tokenizer == 't5-xxl': + feat_clip_text = torch.from_numpy(clip_model.module.encode(clip_text)).float() + + conditions = self.forward(x, feat_clip_text) + conditions = conditions[:, -1, :] + + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.module.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + empty_conditions = self.forward(x, empty_feat_clip_text) + empty_conditions = empty_conditions[:, -1, :] + + temperature = 1.0 + cfg = 7.5 + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + if cfg != 1: + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + else: + scaled_logits = sampled_token_latent + + + prediction_logits = self.classify_head(conditions) + probs = torch.sigmoid(prediction_logits) + predicted_classes = torch.argmax(probs, dim=-1) + + + scaled_logits = scaled_logits.unsqueeze(0) + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + if predicted_classes == 1: + break + + return xs + + + #--------------------Test CFG----------------------- + def sample_for_eval_CFG_test(self, clip_text, if_categorial=False, length=196, clip_model=None, cfg=1, device=torch.device('cuda'), tokenizer='clip', unit_length=4): + + import clip + max_token_len = length // unit_length + + + for k in range(max_token_len): + if k == 0: + x = [] + else: + x = xs + + + if cfg != 1: + if tokenizer == 'clip': + text = clip.tokenize(clip_text, truncate=True).to(device) + + feat_clip_text = clip_model.encode_text(text).float() + elif tokenizer == 't5-xxl': + feat_clip_text = torch.from_numpy(clip_model.module.encode(clip_text)).float() + + conditions = self.forward(x, feat_clip_text) + + conditions = conditions[:, -1, :] + empty_clip_text = '' + if tokenizer == 'clip': + empty_text = clip.tokenize(empty_clip_text, truncate=True).to(device) + empty_feat_clip_text = clip_model.encode_text(empty_text).float() + elif tokenizer == 't5-xxl': + empty_feat_clip_text = torch.from_numpy(clip_model.module.encode(empty_clip_text)).float() + empty_feat_clip_text = empty_feat_clip_text.unsqueeze(0) + empty_feat_clip_text = empty_feat_clip_text.to(device) + + empty_conditions = self.forward(x, empty_feat_clip_text) + empty_conditions = empty_conditions[:, -1, :] + temperature = 1.0 + + + mix_conditions = torch.cat([conditions, empty_conditions], dim=0) + sampled_token_latent = self.diff_loss.sample(mix_conditions, temperature=temperature, cfg=cfg) + + # chunk + scaled_logits, _ = sampled_token_latent.chunk(2, dim=0) + + else: + if tokenizer == 'clip': + text = clip.tokenize(clip_text, truncate=True).to(device) + feat_clip_text = clip_model.encode_text(text).float() + elif tokenizer == 't5-xxl': + feat_clip_text = torch.from_numpy(clip_model.module.encode(clip_text)).float() + feat_clip_text = feat_clip_text.to(device) + + + conditions = self.forward(x, feat_clip_text) + + conditions = conditions[:, -1, :] + temperature = 1.0 + sampled_token_latent = self.diff_loss.sample(conditions, temperature=temperature, cfg=cfg) + scaled_logits = sampled_token_latent + + scaled_logits = scaled_logits.unsqueeze(0) + + if k == 0: + xs = scaled_logits + else: + xs = torch.cat((xs, scaled_logits), dim=1) + + return xs + #-------------------------------------------------- + + def forward_discrete(self, idx: torch.Tensor, clip_feature: torch.Tensor, use_cache=False, past_key_values=None) -> torch.Tensor: + if len(idx) == 0: + token_embeddings = self.transformer.cond_embed(clip_feature).unsqueeze(0) + + else: + b, t = idx.size() + #idx = idx.float() + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + + # forward the LLaMA model itself + token_embeddings = self.transformer.wte(idx) + text_embeddings = self.transformer.cond_embed(clip_feature).unsqueeze(1) + token_embeddings = torch.cat([text_embeddings, token_embeddings], dim=1) + + x = token_embeddings + + # -------------------kv cache------------------- + #presents = () if use_cache else None + if use_cache: + if past_key_values is None: + past_key_values = [None] * len(self.transformer.h) + + + for i,block in enumerate(self.transformer.h): + if use_cache: + last_past = past_key_values[i] + x, presents = block(x, last_past, use_cache) + past_key_values[i] = list(presents) + else: + x = block(x) + x = self.transformer.ln_f(x) + + logits = self.lm_head(x) + + + return logits + + + def forward(self, idx: torch.Tensor, feature: torch.Tensor) -> torch.Tensor: + if len(idx) == 0: + token_embeddings = self.transformer.cond_embed(feature).unsqueeze(0) + + else: + b, t, c = idx.size() + idx = idx.float() + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + + # forward the LLaMA model itself + token_embeddings = self.transformer.wte(idx) + text_embeddings = self.transformer.cond_embed(feature).unsqueeze(1) + token_embeddings = torch.cat([text_embeddings, token_embeddings], dim=1) + + x = token_embeddings + + for i,block in enumerate(self.transformer.h): + x = block(x) + x = self.transformer.ln_f(x) + logits = self.out_proj(x) + return logits + + + def babel_long(self, idx: torch.Tensor, clip_feature: torch.Tensor, use_cache=False, past_key_values=None, num_subseq=None, length=None) -> torch.Tensor: + + b, t, c = idx.size() + idx = idx.float() + idx = self.transformer.wte(idx) + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + for i in range(b): + length_i = length[i][:num_subseq[i]] + clip_feature_i = clip_feature[i][:num_subseq[i]] + + pointer = 0 + for j in range(num_subseq[i]): + if j > 0: + pointer += length_i[j].item() + pointer += 1 + pointer = int(pointer) + + clip_feature_i_j = self.transformer.cond_embed(clip_feature_i[j].unsqueeze(0)).unsqueeze(1) + idx[i] = torch.cat([idx[i][:pointer].unsqueeze(0), clip_feature_i_j, idx[i][pointer:-1].unsqueeze(0)], dim=1)[0] + + x = idx + + + if use_cache: + if past_key_values is None: + past_key_values = [None] * len(self.transformer.h) + + + for i,block in enumerate(self.transformer.h): + if use_cache: + last_past = past_key_values[i] + x, presents = block(x, last_past, use_cache) + past_key_values[i] = list(presents) + else: + x = block(x) + x = self.transformer.ln_f(x) + + logits = self.out_proj(x) + return logits + + + def forward_babel_eval(self, x, return_attention=False) -> torch.Tensor: + layer_attentions = [] + for block in self.transformer.h: + if return_attention: + x, att = block(x, return_attention=True) + layer_attentions.append(att) + else: + x = block(x) + + x = self.transformer.ln_f(x) + if self.use_out_proj: + logits = self.out_proj(x) + else: + logits = x + + if return_attention: + return logits, layer_attentions + return logits + + def forward_babel(self, idx: torch.Tensor, clip_feature: torch.Tensor, A_token_length) -> torch.Tensor: + if len(idx) == 0: # inference + token_embeddings = self.transformer.cond_embed(clip_feature).unsqueeze(1) + + else: + b, t, c = idx.size() + idx = idx.float() + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + + + + A_feature = clip_feature[:, 0, :] + B_feature = clip_feature[:, 1, :] + + + A_text_embeddings = self.transformer.cond_embed(A_feature).unsqueeze(1) + B_text_embeddings = self.transformer.cond_embed(B_feature).unsqueeze(1) + + token_embeddings = torch.zeros(b, self.config.block_size, self.config.n_embd).to(idx.device) + for i in range(b): + A_idx = idx[i, :A_token_length[i].item(), :] + B_idx = idx[i, A_token_length[i].item():-2, :] + token_embeddings[i, :, :] = torch.cat([A_text_embeddings[i], self.BOM_tag, self.transformer.wte(A_idx), B_text_embeddings[i], self.BOM_tag, self.transformer.wte(B_idx)], dim=0) #token_embeddings.shape = (b,t+1,1024) + + x = token_embeddings + for block in self.transformer.h: + x = block(x) + x = self.transformer.ln_f(x) + + if self.use_out_proj: + logits = self.out_proj(x) + else: + logits = x + + + return logits + + def forward_babel2(self, idx: torch.Tensor, clip_feature: torch.Tensor) -> torch.Tensor: + if len(idx) == 0: # inference + token_embeddings = self.transformer.cond_embed(clip_feature).unsqueeze(1) + + else: + b, t, c = idx.size() + idx = idx.float() + assert ( + t <= self.config.block_size + ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" + + B_feature = clip_feature + B_text_embeddings = self.transformer.cond_embed(B_feature) + + idx_embeddings = self.transformer.wte(idx) + + + token_embeddings = torch.cat([B_text_embeddings, idx_embeddings], dim=1) + + + x = token_embeddings + for block in self.transformer.h: + x = block(x) + x = self.transformer.ln_f(x) + + if self.use_out_proj: + logits = self.out_proj(x) + else: + logits = x + + return logits + + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, using_old_initilization: bool = False + ) -> nn.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The new number of tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + Return: + `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. + """ + model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if new_num_tokens is None and pad_to_multiple_of is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = model_embeds.weight.shape[0] + self.vocab_size = model_embeds.weight.shape[0] + + # Tie weights again if needed + # self.tie_weights() + + return model_embeds + + def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + old_embeddings_requires_grad = old_embeddings.weight.requires_grad + new_embeddings.requires_grad_(old_embeddings_requires_grad) + self.set_input_embeddings(new_embeddings) + + # Update new_num_tokens with the actual size of new_embeddings + if pad_to_multiple_of is not None: + # if is_deepspeed_zero3_enabled(): + # import deepspeed + + # with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): + # new_num_tokens = new_embeddings.weight.shape[0] + # else: + new_num_tokens = new_embeddings.weight.shape[0] + + # if word embeddings are not tied, make sure that lm head is resized as well + # if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + if self.get_output_embeddings() is not None and not False: + old_lm_head = self.get_output_embeddings() + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + # if hasattr(old_lm_head, "_hf_hook"): + # hook = old_lm_head._hf_hook + # add_hook_to_module(new_lm_head, hook) + old_lm_head_requires_grad = old_lm_head.weight.requires_grad + new_lm_head.requires_grad_(old_lm_head_requires_grad) + self.set_output_embeddings(new_lm_head) + + return self.get_input_embeddings() + + def _get_resized_embeddings( + self, + old_embeddings: nn.Embedding, + new_num_tokens: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + ) -> nn.Embedding: + """ + Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly + initialized vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_embeddings (`torch.nn.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + + Return: + `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if + `new_num_tokens` is `None` + """ + + if pad_to_multiple_of is not None: + if not isinstance(pad_to_multiple_of, int): + raise ValueError( + f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" + ) + if new_num_tokens is None: + new_num_tokens = old_embeddings.weight.shape[0] + new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of + else: + print( + "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" + f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." + " For more details about this, or help on choosing the correct value for resizing, refer to this guide:" + " https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" + ) + + if new_num_tokens is None: + return old_embeddings + + # if is_deepspeed_zero3_enabled(): + if False: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + else: + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + + # if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + if old_num_tokens == new_num_tokens and not False: + return old_embeddings + + if not isinstance(old_embeddings, nn.Embedding): + raise TypeError( + f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" + " should either use a different resize function or make sure that `old_embeddings` are an instance of" + f" {nn.Embedding}." + ) + + # Build new embeddings + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_embeddings = nn.Embedding( + new_num_tokens, + old_embedding_dim, + device=old_embeddings.weight.device, + dtype=old_embeddings.weight.dtype, + ) + + # initialize all new embeddings (in particular added tokens) + self._init_weights(new_embeddings) + + # Copy token embeddings from the previous weights + + # numbers of tokens to copy + n = min(old_num_tokens, new_num_tokens) + + # if is_deepspeed_zero3_enabled(): + if False: + import deepspeed + + params = [old_embeddings.weight, new_embeddings.weight] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + else: + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + + return new_embeddings + + + def _get_resized_lm_head( + self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False + ) -> nn.Linear: + """ + Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_lm_head (`torch.nn.Linear`): + Old lm head liner layer to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults + to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, + vocab_size` else `vocab_size, lm_head_dim`. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is + `None` + """ + if new_num_tokens is None: + return old_lm_head + + # if is_deepspeed_zero3_enabled(): + if False: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + else: + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + + # if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + if old_num_tokens == new_num_tokens and not False: + return old_lm_head + + if not isinstance(old_lm_head, nn.Linear): + raise TypeError( + f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" + " should either use a different resize function or make sure that `old_lm_head` are an instance of" + f" {nn.Linear}." + ) + + # Build new lm head + new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) + has_new_lm_head_bias = old_lm_head.bias is not None + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_lm_head = nn.Linear( + *new_lm_head_shape, + bias=has_new_lm_head_bias, + device=old_lm_head.weight.device, + dtype=old_lm_head.weight.dtype, + ) + + # initialize new lm head (in particular added tokens) + self._init_weights(new_lm_head) + + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + + # if is_deepspeed_zero3_enabled(): + if False: + import deepspeed + + params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + else: + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + + return new_lm_head + + def _copy_lm_head_original_to_resized( + self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ): + # Copy old lm head weights to new lm head + if not transposed: + new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] + else: + new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] + + # Copy bias weights to new lm head + if has_new_lm_head_bias: + new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] + + @classmethod + def from_name(cls, name: str) -> Self: + return cls(LLaMAHFConfig.from_name(name)) + + +class Block(nn.Module): + def __init__(self, config: LLaMAHFConfig) -> None: + super().__init__() + self.rms_1 = RMSNorm(config.n_embd) + + # sentence level: + self.attn = CausalSelfAttention(config) + self.rms_2 = RMSNorm(config.n_embd) + self.mlp = MLP(config) + + def forward(self, x: torch.Tensor, last_past=None, use_cache=False, return_attention=False) -> torch.Tensor: + if use_cache: + if return_attention: + a, attn = self.attn.forward_attn(self.rms_1(x), last_past, use_cache) + else: + a, present = self.attn(self.rms_1(x), last_past, use_cache) + x = x + a + else: + if return_attention: + a, attn = self.attn.forward_attn(self.rms_1(x)) + else: + a = self.attn(self.rms_1(x)) + x = x + a + x = x + self.mlp(self.rms_2(x)) + + if use_cache: + if return_attention: + return x, present, attn + else: + return x, present + else: + if return_attention: + return x, attn + else: + return x + + +class CausalSelfAttention(nn.Module): + def __init__(self, config: LLaMAHFConfig) -> None: + super().__init__() + assert config.n_embd % config.n_head == 0 + + # key, query, value projections for all heads, but in a batch + self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) + # output projection + self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False) + + self.n_head = config.n_head + self.n_embd = config.n_embd + self.block_size = config.block_size + self.rope_cache = None + + def scaling_factor(sequence_threshold): + return np.log2((sequence_threshold**2) - sequence_threshold) + scale_init = scaling_factor(self.block_size) + self.scale = nn.Parameter(torch.tensor(scale_init)) + + def forward(self, x: torch.Tensor, last_past=None, use_cache=False) -> torch.Tensor: + B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + q, k, v = self.c_attn(x).split(self.n_embd, dim=2) + + head_size = C // self.n_head + k = k.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + q = q.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + v = v.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + + # kv_cache + if use_cache: + if last_past is not None: + past_key, past_value = last_past + k = torch.cat([past_key, k], dim=-2) + v = torch.cat([past_value, v], dim=-2) + # else: + # key_states = k + # value_states = v + + if use_cache: + present = (k, v) + else: + present = None + + # QK-Norm + q = F.normalize(q, p=2, dim=-1) + k = F.normalize(k, p=2, dim=-1) + + if self.rope_cache is None: + # cache for future forward calls + self.rope_cache = build_rope_cache( + seq_len=self.block_size, + n_elem=self.n_embd // self.n_head, + dtype=x.dtype, + device=x.device, + ) + + + q = apply_rope(q, self.rope_cache) + k = apply_rope(k, self.rope_cache) + + + + # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) + # att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) + # att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) + # att = F.softmax(att, dim=-1) + # y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) + + # efficient attention using Flash Attention CUDA kernels + y = F.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True, scale=self.scale.item()) + + y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side + + # output projection + y = self.c_proj(y) + + + if use_cache: + return y, present + return y + + def forward_attn(self, x: torch.Tensor, last_past=None, use_cache=False) -> torch.Tensor: + B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + q, k, v = self.c_attn(x).split(self.n_embd, dim=2) + + head_size = C // self.n_head + k = k.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + q = q.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + v = v.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + + # kv_cache + if use_cache: + if last_past is not None: + past_key, past_value = last_past + k = torch.cat([past_key, k], dim=-2) + v = torch.cat([past_value, v], dim=-2) + # else: + # key_states = k + # value_states = v + + if use_cache: + present = (k, v) + else: + present = None + + # QK-Norm + q = F.normalize(q, p=2, dim=-1) + k = F.normalize(k, p=2, dim=-1) + + if self.rope_cache is None: + # cache for future forward calls + self.rope_cache = build_rope_cache( + seq_len=self.block_size, + n_elem=self.n_embd // self.n_head, + dtype=x.dtype, + device=x.device, + ) + + + q = apply_rope(q, self.rope_cache) + k = apply_rope(k, self.rope_cache) + + + att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) + att = F.softmax(att, dim=-1) # [B, n_head, T, T] + + # efficient attention using Flash Attention CUDA kernels + y = F.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True) + y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side + + # output projection + y = self.c_proj(y) + + return y, att + +class LengthCausalSelfAttention(nn.Module): + def __init__(self, config: LLaMAHFConfig) -> None: + super().__init__() + assert config.n_embd % config.n_head == 0 + + # key, query, value projections for all heads, but in a batch + self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) + # output projection + self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False) + + self.n_head = config.n_head + self.n_embd = config.n_embd + self.block_size = config.block_size + self.rope_cache = None + + def forward(self, x: torch.Tensor, y_mask: torch.Tensor) -> torch.Tensor: + B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + q, k, v = self.c_attn(x).split(self.n_embd, dim=2) + + head_size = C // self.n_head + k = k.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + q = q.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + v = v.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs) + + if self.rope_cache is None: + # cache for future forward calls + self.rope_cache = build_rope_cache( + seq_len=self.block_size, + n_elem=self.n_embd // self.n_head, + dtype=x.dtype, + device=x.device, + ) + + + # q: 1, 16, 40 ,64 + # q: 128, 16, 106, 64 + q = apply_rope(q, self.rope_cache) + k = apply_rope(k, self.rope_cache) + + attn_mask = torch.ones(T, T, dtype=torch.bool, device=x.device) + attn_mask = torch.tril(attn_mask) + attn_mask = attn_mask.unsqueeze(0).expand(B, -1, -1) + + text_mask = y_mask.unsqueeze(2)*y_mask.unsqueeze(1) + text_mask = F.pad(text_mask, (0, T-y_mask.shape[1], 0, T-y_mask.shape[1]), mode='constant', value=0) + attn_mask = torch.logical_or(attn_mask, text_mask) + + y = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask.unsqueeze(1), dropout_p=0.0, is_causal=False) + + y = y.transpose(1, 2).contiguous().view(B, T, C) + + + y = self.c_proj(y) + + return y + + +class MLP(nn.Module): + def __init__(self, config: LLaMAHFConfig) -> None: + super().__init__() + hidden_dim = 4 * config.n_embd + n_hidden = int(2 * hidden_dim / 3) + N = 256 + # ensure n_hidden is multiple of N + n_hidden = ((n_hidden - 1) // N) * N + N + + self.c_fc1 = nn.Linear(config.n_embd, n_hidden, bias=False) + self.c_fc2 = nn.Linear(config.n_embd, n_hidden, bias=False) + self.c_proj = nn.Linear(n_hidden, config.n_embd, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """用了SwiGLU""" + x = F.silu(self.c_fc1(x)) * self.c_fc2(x) + x = self.c_proj(x) + return x + + +class RMSNorm(nn.Module): + """Root Mean Square Layer Normalization. + + Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License: + https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE. + """ + + def __init__(self, size: int, dim: int = -1, eps: float = 1e-5) -> None: + super().__init__() + self.scale = nn.Parameter(torch.ones(size)) + self.eps = eps + self.dim = dim + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # NOTE: the original RMSNorm paper implementation is not equivalent + # norm_x = x.norm(2, dim=self.dim, keepdim=True) + # rms_x = norm_x * d_x ** (-1. / 2) + # x_normed = x / (rms_x + self.eps) + norm_x = torch.mean(x * x, dim=self.dim, keepdim=True) + x_normed = x * torch.rsqrt(norm_x + self.eps) + return self.scale * x_normed + + +def build_rope_cache(seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000) -> torch.Tensor: + """Enhanced Transformer with Rotary Position Embedding. + + Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ + transformers/rope/__init__.py. MIT License: + https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. + """ + # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ + theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem)) + + # Create position indexes `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, dtype=dtype, device=device) + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.outer(seq_idx, theta) + + # Compute cache. Because polar only takes float32 or float64, we need to cast + # when working with 16 bit floats (float16 or bfloat16) + dtypes_requiring_casting = [torch.float16, torch.bfloat16, torch.int8] + working_dtype = ( + torch.float32 if dtype in dtypes_requiring_casting else dtype + ) + complex_dtype = ( + torch.complex32 if dtype in dtypes_requiring_casting else torch.complex64 + ) + cache = torch.polar( + torch.ones_like(idx_theta).to(working_dtype), idx_theta.to(working_dtype) + ).to(complex_dtype) + return cache + + +def apply_rope(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: + x = x.transpose(1, 2) + + # truncate to support variable sizes + T = x.size(1) + rope_cache = rope_cache[:T] + # cast because `view_as_complex` does not support 16 bit tensors + xc = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) + rope_cache = rope_cache.view(1, xc.size(1), 1, xc.size(3)) + x_out = torch.view_as_real(xc * rope_cache).flatten(3) + return x_out.transpose(1, 2).type_as(x) diff --git a/models/resnet.py b/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..7fac134054d07728b75a2ddd603bd96c0c47343c --- /dev/null +++ b/models/resnet.py @@ -0,0 +1,160 @@ +import torch.nn as nn +import torch + +class nonlinearity(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + # swish + return x * torch.sigmoid(x) + +class ResConv1DBlock(nn.Module): + def __init__(self, n_in, n_state, dilation=1, activation='silu', norm=None, dropout=None): + super().__init__() + padding = dilation + self.norm = norm + if norm == "LN": + self.norm1 = nn.LayerNorm(n_in) + self.norm2 = nn.LayerNorm(n_in) + elif norm == "GN": + self.norm1 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True) + self.norm2 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True) + elif norm == "BN": + self.norm1 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True) + self.norm2 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True) + + else: + self.norm1 = nn.Identity() + self.norm2 = nn.Identity() + + if activation == "relu": + self.activation1 = nn.ReLU() + self.activation2 = nn.ReLU() + + elif activation == "silu": + self.activation1 = nonlinearity() + self.activation2 = nonlinearity() + + elif activation == "gelu": + self.activation1 = nn.GELU() + self.activation2 = nn.GELU() + + + self.conv1 = nn.Conv1d(n_in, n_state, 3, 1, padding, dilation) + self.conv2 = nn.Conv1d(n_state, n_in, 1, 1, 0,) + + + def forward(self, x): + x_orig = x + if self.norm == "LN": + x = self.norm1(x.transpose(-2, -1)) + x = self.activation1(x.transpose(-2, -1)) + else: + x = self.norm1(x) + x = self.activation1(x) + + x = self.conv1(x) + + if self.norm == "LN": + x = self.norm2(x.transpose(-2, -1)) + x = self.activation2(x.transpose(-2, -1)) + else: + x = self.norm2(x) + x = self.activation2(x) + + x = self.conv2(x) + x = x + x_orig + return x + +class Resnet1D(nn.Module): + def __init__(self, n_in, n_depth, dilation_growth_rate=1, reverse_dilation=True, activation='relu', norm=None): + super().__init__() + + blocks = [ResConv1DBlock(n_in, n_in, dilation=dilation_growth_rate ** depth, activation=activation, norm=norm) for depth in range(n_depth)] + if reverse_dilation: + blocks = blocks[::-1] + + self.model = nn.Sequential(*blocks) + + def forward(self, x): + return self.model(x) + + + +class CausalResConv1DBlock(nn.Module): + def __init__(self, n_in, n_state, dilation=1, activation='silu', norm=None, dropout=None): + super().__init__() + self.norm = norm + if norm == "LN": + self.norm1 = nn.LayerNorm(n_in) + self.norm2 = nn.LayerNorm(n_in) + elif norm == "GN": + self.norm1 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True) + self.norm2 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True) + elif norm == "BN": + self.norm1 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True) + self.norm2 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True) + else: + self.norm1 = nn.Identity() + self.norm2 = nn.Identity() + + if activation == "relu": + self.activation1 = nn.ReLU() + self.activation2 = nn.ReLU() + elif activation == "silu": + self.activation1 = nonlinearity() + self.activation2 = nonlinearity() + elif activation == "gelu": + self.activation1 = nn.GELU() + self.activation2 = nn.GELU() + + self.left_padding = (3 - 1) * dilation + + self.conv1 = nn.Conv1d(n_in, n_state, kernel_size=3, stride=1, padding=0, dilation=dilation) + self.conv2 = nn.Conv1d(n_state, n_in, kernel_size=1, stride=1, padding=0) + + def forward(self, x): + x_orig = x + if self.norm == "LN": + x = self.norm1(x.transpose(-2, -1)).transpose(-2, -1) + x = self.activation1(x) + else: + x = self.norm1(x) + x = self.activation1(x) + + x = nn.functional.pad(x, (self.left_padding, 0)) + + x = self.conv1(x) + + if self.norm == "LN": + x = self.norm2(x.transpose(-2, -1)).transpose(-2, -1) + x = self.activation2(x) + else: + x = self.norm2(x) + x = self.activation2(x) + + x = self.conv2(x) + x = x + x_orig + return x + +class CausalResnet1D(nn.Module): + def __init__(self, n_in, n_depth, dilation_growth_rate=1, reverse_dilation=True, activation='relu', norm=None): + super().__init__() + + blocks = [ + CausalResConv1DBlock( + n_in, + n_in, + dilation=dilation_growth_rate ** depth, + activation=activation, + norm=norm + ) for depth in range(n_depth) + ] + if reverse_dilation: + blocks = blocks[::-1] + + self.model = nn.Sequential(*blocks) + + def forward(self, x): + return self.model(x) \ No newline at end of file diff --git a/models/tae.py b/models/tae.py new file mode 100644 index 0000000000000000000000000000000000000000..f610c6895c4857da74a566e355736ac86338ff5a --- /dev/null +++ b/models/tae.py @@ -0,0 +1,94 @@ +import torch.nn as nn +from models.causal_cnn import CausalEncoder, CausalDecoder + + +# Causal TAE: +class Causal_TAE(nn.Module): + def __init__(self, + hidden_size=1024, + down_t=2, + stride_t=2, + width=1024, + depth=3, + dilation_growth_rate=3, + activation='relu', + norm=None, + latent_dim=16, + clip_range = [] + ): + + super().__init__() + + self.decode_proj = nn.Linear(latent_dim, width) + + self.encoder = CausalEncoder(272, hidden_size, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm, latent_dim=latent_dim, clip_range=clip_range) + self.decoder = CausalDecoder(272, hidden_size, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm) + + + + def preprocess(self, x): + x = x.permute(0,2,1).float() + return x + + + def postprocess(self, x): + x = x.permute(0,2,1) + return x + + + def encode(self, x): + x_in = self.preprocess(x) + x_encoder, mu, logvar = self.encoder(x_in) + x_encoder = self.postprocess(x_encoder) + x_encoder = x_encoder.contiguous().view(-1, x_encoder.shape[-1]) + + return x_encoder, mu, logvar + + + def forward(self, x): + x_in = self.preprocess(x) + # Encode + x_encoder, mu, logvar = self.encoder(x_in) + x_encoder = self.decode_proj(x_encoder) + # decoder + x_decoder = self.decoder(x_encoder) + x_out = self.postprocess(x_decoder) + return x_out, mu, logvar + + + def forward_decoder(self, x): + # decoder + x_width = self.decode_proj(x) + x_decoder = self.decoder(x_width) + x_out = self.postprocess(x_decoder) + return x_out + + +class Causal_HumanTAE(nn.Module): + def __init__(self, + hidden_size=1024, + down_t=2, + stride_t=2, + depth=3, + dilation_growth_rate=3, + activation='relu', + norm=None, + latent_dim=16, + clip_range = [] + ): + + super().__init__() + self.tae = Causal_TAE(hidden_size, down_t, stride_t, hidden_size, depth, dilation_growth_rate, activation=activation, norm=norm, latent_dim=latent_dim, clip_range=clip_range) + + def encode(self, x): + h, mu, logvar = self.tae.encode(x) + return h, mu, logvar + + def forward(self, x): + x_out, mu, logvar = self.tae(x) + return x_out, mu, logvar + + def forward_decoder(self, x): + x_out = self.tae.forward_decoder(x) + return x_out + \ No newline at end of file diff --git a/options/option_tae.py b/options/option_tae.py new file mode 100644 index 0000000000000000000000000000000000000000..b04f830a7e66e158ae5c1822c37c39eabdbd5d72 --- /dev/null +++ b/options/option_tae.py @@ -0,0 +1,51 @@ +import argparse + +def get_args_parser(): + parser = argparse.ArgumentParser(description='Optimal Transport AutoEncoder training for AIST', + add_help=True, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + ## dataloader + parser.add_argument('--dataname', type=str, default='t2m_272', help='dataset directory') + parser.add_argument('--batch-size', default=128, type=int, help='batch size') + parser.add_argument('--window-size', type=int, default=64, help='training motion length') + + ## optimization + parser.add_argument('--total-iter', default=2000000, type=int, help='number of total iterations to run') + parser.add_argument('--warm-up-iter', default=1000, type=int, help='number of total iterations for warmup') + parser.add_argument('--lr', default=5e-5, type=float, help='max learning rate') + parser.add_argument('--lr-scheduler', default=[50000, 400000], nargs="+", type=int, help="learning rate schedule (iterations)") + parser.add_argument('--gamma', default=0.05, type=float, help="learning rate decay") + + parser.add_argument('--weight-decay', default=0.0, type=float, help='weight decay') + + # causal TAE architecture + parser.add_argument("--down-t", type=int, default=2, help="downsampling rate") + parser.add_argument("--stride-t", type=int, default=2, help="stride size") + parser.add_argument("--depth", type=int, default=3, help="depth of the network") + parser.add_argument("--dilation-growth-rate", type=int, default=3, help="dilation growth rate") + + ## resume + parser.add_argument("--resume-pth", type=str, default=None, help='resume pth for causal TAE') + + + ## output directory + parser.add_argument('--out-dir', type=str, default='output/', help='output directory') + parser.add_argument('--results-dir', type=str, default='visual_results/', help='output directory') + parser.add_argument('--visual-name', type=str, default='vis', help='output directory') + parser.add_argument('--exp-name', type=str, default='exp', help='name of the experiment, will create a file inside out-dir') + parser.add_argument('--latent_dir', type=str, default='t2m_latents/', help='latent directory') + ## other + parser.add_argument('--print-iter', default=200, type=int, help='print frequency') + parser.add_argument('--eval-iter', default=20000, type=int, help='evaluation frequency') + parser.add_argument('--seed', default=123, type=int, help='seed for initializing training.') + + parser.add_argument('--vis-gt', action='store_true', help='whether visualize GT motions') + parser.add_argument('--nb-vis', default=20, type=int, help='nb of visualizations') + parser.add_argument('--root_loss', default=7.0, type=float, help='root loss') + parser.add_argument('--latent_dim', default=16, type=int, help='latent dimension') + parser.add_argument('--hidden_size', default=1024, type=int, help='hidden size') + parser.add_argument('--nb_joints', default=22, type=int, help='number of joints') + parser.add_argument('--num_gpus', default=1, type=int, help='number of GPUs') + + return parser.parse_args() \ No newline at end of file diff --git a/options/option_transformer.py b/options/option_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..d050eb944172eeedf02839ab3386ad3c8dddbdd2 --- /dev/null +++ b/options/option_transformer.py @@ -0,0 +1,39 @@ +import argparse + +def get_args_parser(): + parser = argparse.ArgumentParser(description='options', + add_help=True, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataname', type=str, default='t2m_272', help='dataset directory') + parser.add_argument('--seed', default=123, type=int, help='seed for initializing training. ') + parser.add_argument('--batch_size', default=256, type=int, help='batch size for training. ') + parser.add_argument('--latent_dir', type=str, default='latent/', help='latent directory') + parser.add_argument("--resume-pth", type=str, default=None, help='resume pth for causal TAE') + parser.add_argument("--resume-trans", type=str, default=None, help='resume gpt pth') + parser.add_argument('--out-dir', type=str, default='output_GPT_Final/', help='output directory') + parser.add_argument('--exp-name', type=str, default='exp', help='name of the experiment, will create a file inside out-dir') + parser.add_argument('--hidden_size', default=1024, type=int, help='hidden size') + parser.add_argument("--down-t", type=int, default=2, help="downsampling rate") + parser.add_argument("--stride-t", type=int, default=2, help="stride size") + parser.add_argument("--depth", type=int, default=3, help="depth of the network") + parser.add_argument("--dilation-growth-rate", type=int, default=3, help="dilation growth rate") + + parser.add_argument('--num_diffusion_head_layers', type=int, default=9, help='number of diffusion head layers') + parser.add_argument('--latent_dim', type=int, default=16, help='latent dimension') + parser.add_argument('--total_iter', type=int, default=100000, help='total iteration') + parser.add_argument('--lr', default=1e-4, type=float, help='max learning rate') + parser.add_argument('--gamma', default=0.05, type=float, help="learning rate decay") + + parser.add_argument('--decay-option',default='all', type=str, choices=['all'], help='weight decay option') + parser.add_argument('--weight-decay', default=1e-6, type=float, help='weight decay') + parser.add_argument('--optimizer',default='adamw', type=str, choices=['adam', 'adamw'], help='optimizer') + + parser.add_argument('--num_gpus', default=1, type=int, help='number of GPUs') + parser.add_argument('--total-iter', default=2000000, type=int, help='number of total iterations to run') + parser.add_argument('--batch-size', default=256, type=int, help='batch size') + + + + + return parser.parse_args() \ No newline at end of file diff --git a/outputs/final_motion.bvh b/outputs/final_motion.bvh new file mode 100644 index 0000000000000000000000000000000000000000..e8c343cc0c6119dc62defc04a6251918a23f2650 --- /dev/null +++ b/outputs/final_motion.bvh @@ -0,0 +1,456 @@ +HIERARCHY +ROOT Pelvis +{ + OFFSET -0.001795 -0.223333 0.028219 + CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation + JOINT Left_hip + { + OFFSET 0.069520 -0.091406 -0.006815 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_knee + { + OFFSET 0.034277 -0.375199 -0.004496 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_ankle + { + OFFSET -0.013596 -0.397961 -0.043693 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_foot + { + OFFSET 0.026358 -0.055791 0.119288 + CHANNELS 3 Zrotation Yrotation Xrotation + End Site + { + OFFSET 0.000000 0.000000 0.000000 + } + } + } + } + } + JOINT Right_hip + { + OFFSET -0.067670 -0.090522 -0.004320 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_knee + { + OFFSET -0.038290 -0.382569 -0.008850 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_ankle + { + OFFSET 0.015774 -0.398415 -0.042312 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_foot + { + OFFSET -0.025372 -0.048144 0.123348 + CHANNELS 3 Zrotation Yrotation Xrotation + End Site + { + OFFSET 0.000000 0.000000 0.000000 + } + } + } + } + } + JOINT Spine1 + { + OFFSET -0.002533 0.108963 -0.026696 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Spine2 + { + OFFSET 0.005487 0.135180 0.001092 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Spine3 + { + OFFSET 0.001457 0.052922 0.025425 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Neck + { + OFFSET -0.002778 0.213870 -0.042857 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Head + { + OFFSET 0.005152 0.064970 0.051349 + CHANNELS 3 Zrotation Yrotation Xrotation + End Site + { + OFFSET 0.000000 0.000000 0.000000 + } + } + } + JOINT Left_collar + { + OFFSET 0.078845 0.121749 -0.034090 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_shoulder + { + OFFSET 0.090977 0.030469 -0.008868 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_elbow + { + OFFSET 0.259612 -0.012772 -0.027456 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_wrist + { + OFFSET 0.249234 0.008986 -0.001171 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Left_palm + { + OFFSET 0.084042 -0.008162 -0.014945 + CHANNELS 3 Zrotation Yrotation Xrotation + End Site + { + OFFSET 0.000000 0.000000 0.000000 + } + } + } + } + } + } + JOINT Right_collar + { + OFFSET -0.081759 0.118833 -0.038615 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_shoulder + { + OFFSET -0.096012 0.032551 -0.009143 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_elbow + { + OFFSET -0.253742 -0.013329 -0.021401 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_wrist + { + OFFSET -0.255298 0.007772 -0.005559 + CHANNELS 3 Zrotation Yrotation Xrotation + JOINT Right_palm + { + OFFSET -0.084622 -0.006117 -0.010315 + CHANNELS 3 Zrotation Yrotation Xrotation + End Site + { + OFFSET 0.000000 0.000000 0.000000 + } + } + } + } + } + } + } + } + } +} +MOTION +Frames: 312 +Frame Time: 0.016667 +664.394567 -454.367743 279.668753 -34.642940 15.720695 -28.313179 96.200034 -7.083818 7.507141 -119.770951 74.416497 39.595198 -109.828517 12.990748 84.321912 9.083148 22.931608 22.906165 -97.447895 33.959825 26.075237 -147.844744 -46.986418 46.227157 103.633066 -26.811855 -84.692045 -100.381606 23.287875 -172.020265 -72.288201 52.798039 8.293659 85.914356 -20.390779 -55.371473 87.455386 -25.269319 -61.519020 86.124661 1.183795 -65.880591 121.296783 30.478883 -8.945043 -92.163803 13.320082 95.996608 120.468774 63.648941 -51.658881 -107.161387 -49.571471 81.564965 136.127574 -21.294420 4.268428 0.000000 0.000000 0.000000 60.293873 -50.142597 -99.610494 -163.385968 73.584382 29.313966 160.729855 33.218761 158.242501 -143.826805 5.240959 -41.769284 0.000000 0.000000 0.000000 +-378.156448 -1011.672491 954.359687 -116.319806 4.995936 8.118444 89.775664 -10.568271 21.804347 -97.837621 36.102891 -35.210936 -137.127578 -29.482450 -9.201277 -28.865772 54.049706 -179.825269 -98.630815 -29.177673 -1.205708 -111.590134 5.807557 -54.760549 116.440677 13.464784 -9.025185 -120.802204 32.875379 175.718119 -90.202165 -41.887242 -52.824169 71.321061 -53.418499 32.085393 87.324071 -48.237004 -56.166093 86.242083 7.214505 -68.681881 148.043767 -31.018491 -47.134849 -93.429676 19.278824 78.103730 -120.385339 45.432516 67.612117 -100.233175 27.060915 58.893136 133.873890 -21.097624 -1.866130 0.000000 0.000000 0.000000 -172.689843 -80.920316 97.935980 -87.717971 -7.664948 53.926036 137.521079 -0.934968 81.220073 -118.610669 17.282567 -27.298907 0.000000 0.000000 0.000000 +576.629494 -1321.777777 1354.692633 20.248740 -36.035818 -34.665495 76.824256 -24.780794 42.244531 -96.189958 37.053405 -50.629964 117.275089 0.584783 75.433097 -20.857460 -15.751310 4.704369 -96.898523 -37.299042 -11.762765 -114.981946 44.569951 -49.992146 128.615485 13.859542 10.186034 40.505629 28.253973 -9.960889 107.050621 -54.764048 -6.673838 81.268545 -40.350797 58.999963 -86.705725 -35.977592 96.590819 86.022921 -19.972068 -63.110154 158.760097 -12.420717 -55.693146 -95.641779 10.449928 88.818553 -17.456819 24.691249 -105.091666 -75.740632 10.825927 38.598589 130.611649 -22.313338 -4.276152 0.000000 0.000000 0.000000 78.673341 -45.413075 -122.015713 -92.046286 27.019958 90.729894 120.523184 16.756314 63.479213 -147.902617 3.308429 -43.003423 0.000000 0.000000 0.000000 +-46.164301 -1485.694769 607.421264 -53.953571 -12.444179 10.165484 85.763218 -42.577947 35.424319 -100.632252 -18.915730 -59.699403 117.147485 20.859054 74.625384 -23.643223 -16.235326 11.784979 -94.245424 -17.798024 -8.777111 -126.700870 53.872110 -59.520585 108.307490 -2.100733 -15.545737 -1.060170 43.858760 -16.158936 112.892844 -52.894069 -15.407083 88.672773 -42.715394 51.349807 92.856510 -14.626574 -18.735251 81.679653 -16.277063 -56.432115 -130.228040 27.612068 -80.550440 -81.406741 -11.038629 99.289026 -31.430420 48.467167 -74.298108 -56.284461 15.977284 37.418697 132.845947 -19.397853 -12.031173 0.000000 0.000000 0.000000 109.303025 -31.212980 -143.971861 -168.794631 59.393066 117.687818 62.154702 17.266543 -21.973935 -153.434005 -14.663551 -47.117522 0.000000 0.000000 0.000000 +285.369558 -1266.618719 988.396630 176.221609 -8.982778 54.915086 82.982195 -28.928948 62.319012 91.262086 -38.891824 82.464916 111.767989 -10.648991 72.965445 8.548010 -10.576469 -3.798068 -94.329612 -18.935319 -4.980737 -81.636630 60.592977 14.064137 105.960637 -4.192466 20.994410 -8.081165 53.607920 -34.516901 101.820001 1.163469 -23.843103 76.990866 -30.336700 53.725881 -64.353686 -29.350792 0.850022 82.527762 -12.941309 -49.140033 157.676605 33.011856 -93.096541 -49.927844 31.639246 61.835952 46.294364 47.839035 55.029233 -53.799767 24.902878 56.569738 119.655863 -38.544855 -19.981824 0.000000 0.000000 0.000000 92.656797 -20.569405 -99.234262 -131.235820 82.804253 129.219127 81.728268 1.522838 38.752301 -148.458311 -9.304546 -46.256991 0.000000 0.000000 0.000000 +-256.791587 -1188.460760 1071.842355 103.595977 39.363152 6.748814 52.762042 -36.969309 33.430488 76.609373 -73.009656 -126.925214 -130.834049 -32.408548 29.320578 -43.880107 8.336155 18.366289 -88.605004 39.511433 15.872920 -144.082336 11.680953 -75.425563 105.489700 -18.556421 12.442788 -7.291644 38.906337 -22.950910 104.052572 61.262728 28.398136 93.074279 16.248405 88.990489 -114.745493 3.897792 -159.505730 73.428907 -6.075944 -42.495305 90.425723 41.089960 -103.587657 -9.749696 -0.727525 109.552027 -49.901633 -34.673199 -107.565941 -5.981058 -1.982898 90.057739 124.958039 -35.978341 -30.994894 0.000000 0.000000 0.000000 -100.248041 26.532360 32.779395 -156.367502 22.443407 81.121151 23.551647 10.142089 91.009371 -130.466448 6.482801 -45.692618 0.000000 0.000000 0.000000 +978.347386 -1408.368841 650.288217 -160.605363 -69.635509 102.666690 82.853788 -40.061609 18.012322 132.903836 59.397396 102.732040 -117.770660 -28.486037 -21.549053 -12.424947 16.783128 12.399481 -91.306234 23.635650 5.582019 -153.124082 -31.382467 80.397284 103.650455 22.591071 28.223567 -24.412103 27.808449 -45.586074 81.834044 55.712931 -1.591459 90.507389 5.528542 74.057747 -85.068214 24.239342 -74.363638 80.526615 -3.725030 -54.662373 103.563258 -2.477567 -77.957678 -11.396035 -27.984324 103.006973 -58.066912 -41.801524 -94.259904 -27.901365 20.561648 72.305475 123.924717 -34.113964 -55.845577 0.000000 0.000000 0.000000 -75.367302 38.553554 18.222956 -142.769939 62.596499 94.360827 78.995156 9.641502 111.207235 -116.839154 -22.482748 -33.914385 0.000000 0.000000 0.000000 +1725.992460 -2299.796820 2751.539023 -57.756373 11.173188 -131.140992 90.821551 -31.772881 2.917590 44.726065 77.751038 -79.675578 128.810533 -20.134035 96.103941 -43.417964 -9.748487 8.695480 -88.843549 28.357128 8.533346 -123.082235 -14.380703 30.330888 102.288964 45.486400 15.641326 14.517627 13.466612 3.160237 -102.330393 73.042791 -170.272760 93.236296 -23.385756 78.891537 -94.031587 22.866021 -97.100738 78.117619 2.835204 -51.531216 95.632819 -57.379696 -87.827454 -7.710299 -52.618007 87.773599 -53.783233 -8.187718 -87.019721 -9.039195 -3.530461 88.756682 109.721449 -5.138974 -86.583344 0.000000 0.000000 0.000000 -85.321256 84.611607 39.918882 -89.146518 76.247219 101.478175 -7.952525 57.032886 87.433947 -116.742098 11.342047 -60.975898 0.000000 0.000000 0.000000 +2883.427154 -1947.653875 2588.632753 -120.374473 -50.722259 165.815288 98.991271 -37.890176 13.787612 94.275150 32.285946 -7.535551 117.660191 36.318758 67.068721 -3.526129 -67.703308 1.137542 -94.335249 12.942381 -0.008323 -94.002789 17.832033 12.040128 98.482304 30.705124 -29.497208 9.618453 25.613286 3.897858 143.452189 84.291534 43.576711 99.711606 -45.733724 44.872480 91.643185 -58.983246 69.602472 80.573597 -13.205225 -38.772722 -79.108634 54.277542 -70.781263 33.316036 -40.840130 19.235620 -52.797938 16.207048 -78.853704 -29.028113 19.599552 39.339741 98.434931 -30.091828 -84.735481 0.000000 0.000000 0.000000 107.396456 -14.847515 -142.070459 90.989185 49.527031 -59.730274 134.149908 -36.374593 20.318196 -109.103583 -18.789068 -30.663176 0.000000 0.000000 0.000000 +2223.666983 -2080.083197 3314.994732 -28.945834 -8.057533 -43.186672 94.190515 -40.003009 36.649148 78.120798 -67.079321 -34.239106 116.273849 33.210692 59.764370 -2.270083 11.464018 -13.157798 -91.951876 9.920779 8.142304 -75.972278 17.020525 11.545384 86.941113 2.521944 -27.467468 -15.041490 22.545408 -4.914620 101.532534 37.887811 -27.466620 82.815910 -17.827624 66.807931 -77.740290 55.619831 61.760890 73.142637 -3.186778 -59.029527 -48.917044 41.393900 -32.888314 31.083106 -51.953850 41.359856 21.225015 51.887974 10.507942 -42.445779 18.507382 21.595741 97.161244 -59.618325 -91.936798 0.000000 0.000000 0.000000 -54.823847 -48.099384 33.748444 56.609968 22.497603 -86.543244 47.229283 49.561195 123.334611 -110.967176 38.098510 -76.949060 0.000000 0.000000 0.000000 +3759.856263 -2242.166693 3511.864674 112.746395 -67.179693 -41.751537 96.372128 -52.729447 41.768400 175.685527 -72.715213 -91.278936 109.441627 27.771130 57.917064 -31.470559 15.871493 -2.343824 -87.271782 2.183053 0.598631 -81.285740 34.558974 -32.415821 81.926564 1.803799 -12.655375 7.510924 42.594810 -3.127569 89.373332 22.790813 -39.962320 81.240756 -17.032230 72.224393 -87.743182 55.570748 -107.852257 75.673780 -8.146512 -64.343815 -29.433318 43.861536 -10.513206 -40.147798 17.966794 50.871464 -64.866496 -21.596405 -106.622779 -12.377532 22.973904 42.664093 97.894205 -59.688568 -85.793939 0.000000 0.000000 0.000000 57.960125 -59.266169 -79.288830 79.085302 42.253067 -85.101361 112.137204 73.248835 171.617094 -101.542487 5.941928 -71.778421 0.000000 0.000000 0.000000 +2441.959789 -2433.549261 4813.219149 -19.309807 -33.299947 -77.515433 86.504261 -60.590007 53.332005 93.096554 7.453789 23.817522 111.926034 28.168153 56.855749 -41.433332 -27.068540 -8.387188 -87.133062 1.123076 -0.870421 -81.460847 -5.876577 -49.494435 87.887143 -0.832680 12.951580 9.127910 39.442175 -0.513303 88.583914 47.138347 -40.504830 79.725535 -27.406412 57.981553 -87.550979 33.407802 -114.853954 76.747958 -7.514137 -59.473359 -37.594930 37.324869 -24.730196 -39.690209 6.016624 68.100099 -68.776369 -34.348716 -102.070707 -20.453911 18.453075 62.863085 98.241779 -26.368059 -74.461156 0.000000 0.000000 0.000000 48.461254 -62.948951 -60.503833 -49.255182 72.765692 138.276339 -138.075563 48.164134 -83.434608 -109.086867 17.361651 -45.599595 0.000000 0.000000 0.000000 +2911.804235 -2453.827337 4684.713178 146.747908 3.845052 130.561235 87.220480 -60.654330 45.966144 75.462705 -32.973956 32.405887 114.365597 20.345781 67.695271 -6.050915 -19.371280 8.644130 -91.818586 2.412696 -1.732658 -79.698486 18.428492 -12.397346 98.400637 18.026575 3.953859 2.408190 53.520264 4.742554 99.974222 31.022135 -12.593048 78.326423 -24.282076 63.751990 -69.719013 3.954603 -110.178658 72.524001 -54.812973 -55.469244 -73.468256 36.473874 -39.692722 -22.577805 -24.078736 79.603120 -48.661850 35.924271 -72.357092 -29.917939 20.494337 62.128964 105.098000 -30.336462 -56.461030 0.000000 0.000000 0.000000 -59.037359 38.685369 19.702574 -97.803953 -29.625066 -49.161578 52.821101 31.163185 120.859366 -110.458156 24.122742 -39.169379 0.000000 0.000000 0.000000 +3254.882886 -1697.056830 4675.492173 147.653216 50.731918 -54.868443 92.696536 -10.362829 25.117609 77.921537 -10.404683 45.183815 131.882602 -64.593721 95.605796 14.884772 -17.033490 12.577508 -95.857379 -22.719245 1.435338 -69.216151 26.029354 -0.385810 101.485328 37.942057 7.155921 -7.800223 51.002111 -3.478182 100.536496 -28.215469 25.055640 -0.919796 81.640397 -1.159321 -81.855220 26.154835 -55.778037 80.420306 -44.437129 -67.062788 -95.433981 21.664470 -39.084939 78.380653 -30.900700 -82.873772 15.665759 33.424488 -13.761571 -43.333810 -3.436984 15.905807 110.433104 -29.517535 -30.668365 0.000000 0.000000 0.000000 -84.718955 45.896145 -55.219719 -100.390181 -28.274749 -61.883544 -36.213525 2.785267 -27.661867 -112.207791 24.146797 -37.546827 0.000000 0.000000 0.000000 +3130.462854 -1054.864081 5418.311641 44.150151 -25.014840 55.887289 110.915377 64.775794 -24.143407 85.900317 10.850592 60.028035 -100.005503 -65.186782 19.677853 -10.608083 -36.982897 26.565361 -99.241805 -23.526147 4.378187 -90.008579 -6.566207 41.528337 114.516302 -11.622467 69.275242 106.651547 50.931976 78.779853 125.186020 -60.448117 32.073805 -93.058332 -52.047937 -40.442887 -97.127991 -23.258853 -51.066605 79.127877 -25.838427 76.138064 -146.118679 15.781496 -124.266092 -95.434797 13.405146 82.896512 4.876387 26.240602 -29.796967 -21.973599 -31.903170 -73.417951 112.046948 -36.447973 -1.244904 0.000000 0.000000 0.000000 -123.609642 47.129750 -108.777274 -102.717100 -33.145162 -64.016411 -30.203041 12.251907 -49.112305 -127.264132 28.419762 -26.438151 0.000000 0.000000 0.000000 +2419.543991 -513.043219 4624.557337 52.649575 19.426877 -34.427669 -146.846704 66.249171 36.499003 85.243612 23.964917 75.713085 -89.671113 -16.723821 37.109967 -27.052787 -17.317897 108.515392 -126.102678 -59.875985 3.099858 -71.780631 -35.565178 74.317831 113.260739 -21.185440 69.324043 149.087503 -5.161543 29.205420 95.949860 -39.592843 50.526338 157.568499 -65.526536 65.168398 -94.008790 -35.368907 -36.379978 133.317960 -55.848139 27.473046 -175.632014 17.460776 -43.362505 159.027743 53.071934 -26.679504 11.862780 18.521693 -63.924401 -30.551077 -51.160457 -67.325245 113.839014 -35.348032 -1.674618 0.000000 0.000000 0.000000 -114.306699 52.563627 -87.966206 -103.844240 -31.170940 -61.264708 -64.477522 22.640047 -75.820522 -129.187420 28.220421 -22.693211 0.000000 0.000000 0.000000 +1756.046699 -272.571539 3823.249489 -51.186753 -49.691660 34.152530 -158.942230 80.953394 10.345329 86.089125 30.439613 85.182663 -94.701759 -49.355405 49.523008 -55.039613 0.465425 139.793196 -159.532813 -61.699234 65.458307 -87.992354 10.674138 97.012049 130.792754 -9.876502 59.613505 154.640516 -14.095594 -1.625805 118.286742 -61.945847 76.029497 -107.822413 -61.745498 -15.697272 -97.899818 -51.586223 -56.768661 -134.003171 -69.895623 -31.727964 -145.491915 -0.728266 -115.846812 156.179934 -42.436657 -132.665587 55.349033 2.836913 -69.143772 -5.843728 -37.264699 -81.532960 111.733247 -39.148165 4.064164 0.000000 0.000000 0.000000 -112.691660 55.593893 -96.073356 -110.094644 -38.445703 -57.821752 -39.333955 4.806385 -74.511728 -132.666148 31.438564 -20.920243 0.000000 0.000000 0.000000 +3187.391512 144.899942 1957.395633 114.156291 43.290499 -24.703679 -118.082267 -17.464119 56.029529 82.497389 27.299919 78.698348 -72.610248 47.617769 68.524933 -15.760385 -33.652832 62.162299 175.649693 12.188473 66.746001 -1.150676 -62.750148 5.093183 159.394374 -78.182143 18.010497 55.410883 -5.300430 74.094106 106.391616 -48.302703 61.941975 -97.915263 -57.457901 1.503979 -95.866775 -3.679146 -44.974767 102.925518 -58.807457 76.317560 -131.831256 11.538917 -127.754335 93.933651 -61.895858 -80.873360 72.795764 -10.658657 -74.386214 -65.889276 57.779963 89.001133 115.738184 -38.272776 0.113243 0.000000 0.000000 0.000000 -108.662493 52.332283 -81.764315 -106.472935 -37.340920 -60.264944 -80.071317 1.535336 -80.654795 -148.717533 25.594145 -27.632013 0.000000 0.000000 0.000000 +4752.789254 162.371897 3016.016245 27.448732 -47.001724 -14.545996 -100.048443 -53.743731 46.356023 86.373718 42.083327 83.442523 9.848202 86.331932 158.684469 6.407283 -43.350081 39.451544 108.336189 71.817728 -48.269035 -18.127492 -33.764681 -8.021126 125.163540 -76.327283 31.917997 42.931998 -3.666790 137.997463 119.221509 75.651454 158.772847 -84.334256 -59.671429 -6.239714 -93.160566 -33.444866 -38.373210 65.986285 -77.698356 125.749838 -110.190315 25.537307 -106.012790 -87.591066 -1.467419 33.231367 60.042923 9.054565 -56.437763 -106.093076 3.412196 76.770446 111.915029 -39.287014 4.892715 0.000000 0.000000 0.000000 -144.026073 82.239490 -128.539620 -101.915790 -37.371860 -64.144871 -62.408082 -11.865657 -83.497899 -145.012935 23.956350 -15.970928 0.000000 0.000000 0.000000 +4260.649368 293.474848 1565.480047 66.266885 57.664058 98.263595 -91.289803 -71.107640 23.552776 85.599043 23.745154 84.831452 -71.540575 65.470821 67.680954 -2.127213 -31.967315 50.293615 -125.927833 76.587802 53.973239 -18.005091 24.875486 -15.979091 80.556718 -72.203666 73.152877 86.489797 -30.325084 102.291872 99.968553 56.537872 123.309697 -80.854315 -54.084346 -4.354224 -93.153694 -12.745560 -20.362033 -5.324095 -81.563222 -154.711145 -101.880186 -11.149622 -57.862195 -79.783184 -11.966177 11.251814 45.165463 1.377165 -26.182061 -169.773267 -62.530221 130.095193 109.378842 -35.384010 3.923263 0.000000 0.000000 0.000000 135.862838 82.007055 148.874621 -90.683654 -25.343985 -66.465165 -12.520382 25.989880 -100.835024 -143.074918 22.967435 -10.943179 0.000000 0.000000 0.000000 +4012.809949 264.733789 1817.999319 -106.608285 4.840100 -86.937742 -90.528389 -58.144774 28.207200 94.918164 20.996415 70.229498 -92.376665 -6.782995 47.139754 25.015134 -27.031177 38.938389 -145.513081 79.363606 36.496715 36.605157 55.155821 8.574179 80.044772 -47.710819 55.185997 123.643745 -11.947048 121.795394 69.578145 34.138006 110.183783 -93.930814 -59.308484 30.871238 -98.016069 -55.052617 3.063689 35.040315 -36.199945 -112.098886 -117.191634 -36.382923 17.680408 -120.775852 27.494278 18.491393 88.248943 5.416160 -90.587284 179.645276 83.845473 77.180776 103.222497 -37.170660 20.354257 0.000000 0.000000 0.000000 116.744435 18.222916 103.538776 -74.340181 -41.537352 -57.075492 49.242450 -24.556596 -119.278565 -140.880324 5.324438 4.894481 0.000000 0.000000 0.000000 +4177.209860 383.729334 892.764602 170.203184 8.099239 164.623521 -85.641643 -44.460300 18.128778 90.054251 36.550816 48.230601 -101.272645 -16.114436 20.672254 -41.047510 25.721392 33.669674 50.177822 59.736780 -32.884204 101.737119 74.654891 118.461264 55.661666 -68.288832 109.643494 -145.000595 -27.770787 -18.213777 72.966742 -31.365147 5.270585 -120.360170 -56.650828 79.589750 -93.974445 -52.804533 5.737384 74.839920 13.265843 -86.208045 -100.963259 -19.973535 17.143402 -163.822510 45.771121 -13.522207 -60.017143 -72.032717 -24.566089 -47.172142 11.257561 -104.512249 -97.960431 15.816231 -23.219696 0.000000 0.000000 0.000000 97.859740 10.707407 91.497639 -83.634018 -34.741793 -67.043736 43.563551 -4.024073 -109.642967 173.015381 -7.962481 61.806025 0.000000 0.000000 0.000000 +2719.100731 235.362070 -89.809083 -141.093911 -50.995501 38.719208 42.097739 63.778556 -129.735456 78.848761 51.097814 8.209064 -101.393711 -6.636664 7.545275 -156.098930 14.892677 -20.026321 78.324029 -44.317746 29.483334 119.717250 66.568345 131.219555 20.654557 -83.099240 -105.663346 -176.534949 -32.734951 22.220838 76.312566 11.711209 0.681385 -148.616457 -2.156670 108.946124 -89.397953 -24.004776 -24.666896 91.558552 -17.433884 -74.367003 -122.648752 -58.787247 113.808685 127.957734 12.451296 -55.815116 -106.101329 -5.050334 29.625900 -26.307662 9.128413 -106.345300 -89.665562 -13.654520 5.846446 0.000000 0.000000 0.000000 89.668689 17.657808 87.654017 -88.620221 -17.817359 -87.498261 40.532415 8.040153 -106.839342 50.139920 70.561924 17.325180 0.000000 0.000000 0.000000 +2797.097305 55.306154 2998.903532 -122.117205 17.545322 -65.948341 81.060938 12.187033 -61.727245 103.891536 65.800925 16.717001 -100.750280 -22.512724 -17.224989 -153.906166 20.780551 -1.149771 160.372080 -58.759050 -34.340617 -127.568205 51.622197 -99.804061 -172.056249 -55.070494 85.510673 -151.348577 -24.118208 -23.454826 76.229723 7.601371 2.188983 -155.297453 20.527286 98.153953 -94.422831 -23.331710 -63.091543 96.806529 -28.531134 -55.958580 109.972706 -12.750415 -62.745966 95.344871 -1.517752 -17.728866 -80.419062 -33.938257 -23.005410 -30.949466 -4.943349 -101.186558 -92.278567 -8.062214 3.466648 0.000000 0.000000 0.000000 81.204329 12.846239 90.456047 94.349674 33.550544 79.313733 36.459674 21.562377 -97.813471 76.646640 48.562914 -3.553675 0.000000 0.000000 0.000000 +3115.248613 -146.580191 3402.964299 156.493934 -7.257488 -162.862559 89.550302 6.185079 -53.490142 93.824170 47.769480 -14.204554 -89.381698 -3.074283 -3.495857 -98.575126 25.916984 5.751553 148.295994 -19.627733 -85.759067 -113.349175 30.474481 -58.147049 -123.734427 -67.054836 67.947455 -95.204882 16.875861 -85.468855 79.941910 32.347856 -3.555053 152.814034 -31.062157 -47.945159 -92.682750 -12.898507 -63.272138 82.893930 -55.144979 -63.454128 116.867754 -25.509868 -90.067648 88.585940 12.790535 12.282791 -68.202247 -12.720177 -55.815280 -26.563053 -4.416480 -81.215169 -100.306904 -14.074902 -5.209244 0.000000 0.000000 0.000000 -16.180705 67.761370 -0.889693 70.421173 30.843700 63.710482 46.672047 14.055942 -89.702962 95.614828 10.309087 1.616112 0.000000 0.000000 0.000000 +956.008855 -426.117132 1620.088767 29.240865 -15.880597 163.146093 88.505243 24.723926 -32.149363 81.685830 51.286931 -26.639802 -92.130300 -33.795697 0.726537 -159.206896 41.293400 -36.584110 168.105317 75.811424 -77.304624 -90.869256 -34.491742 -44.174390 138.681233 -31.536574 103.161163 64.818367 -44.714871 37.120067 76.920999 19.984655 7.951208 -144.128644 -8.601242 -60.681791 -94.794157 -33.358372 -54.669251 85.281058 40.062824 48.213624 -110.328200 43.810827 6.203958 87.829748 43.477038 -11.534545 -64.036962 11.379854 -64.713662 -15.636439 -6.111899 -113.053612 -98.069543 -11.415945 -3.745073 0.000000 0.000000 0.000000 -99.087445 14.865156 -57.466416 -88.646207 -23.719060 -57.014507 -11.641176 42.005091 137.636449 93.859131 1.731575 -4.663365 0.000000 0.000000 0.000000 +3872.234705 -530.203314 573.643280 -139.139383 -33.969897 88.785961 69.441337 0.976089 -20.095059 78.957601 33.905846 -67.090082 -160.054422 -69.088971 81.148030 -54.622387 -5.777964 16.508684 -119.387348 79.763513 16.954260 -90.802429 -29.082550 -24.464908 111.234278 -2.434823 106.251601 24.557932 23.250407 5.895834 78.845330 18.847466 -6.711606 -144.433115 16.211632 -70.102374 -95.419038 -34.725759 -38.070815 -60.771926 -18.517826 -68.698608 -68.015786 3.809272 9.695867 91.787891 55.827077 29.537125 -77.724617 -13.389411 -75.769892 17.662743 -26.494057 144.312975 -96.506238 -11.678696 -3.960468 0.000000 0.000000 0.000000 -106.944677 -34.116450 -0.461827 -58.507040 -28.562914 -37.715773 -87.644136 64.796976 31.680725 93.049345 0.380150 -3.721523 0.000000 0.000000 0.000000 +2219.013419 -604.322455 -1776.875008 -60.761355 72.631898 38.008830 98.339503 -49.530006 20.490280 80.073437 18.321273 -41.384728 129.520437 44.798874 53.923250 -8.962841 7.683510 16.109221 -86.629673 58.067974 33.242134 -81.780121 4.319965 7.683534 87.592772 -16.902058 44.027544 7.673505 34.261196 2.820687 85.673101 26.118759 -13.463868 -120.995483 -0.167202 -81.982035 -96.972602 -54.626419 -17.068317 -62.628968 17.006191 -69.920686 -68.734938 8.862998 -3.505977 120.360369 2.194701 27.922795 5.979321 -72.990812 -179.808579 -52.609202 7.813293 -69.566172 -99.154454 -10.973300 -5.263045 0.000000 0.000000 0.000000 -111.373264 -31.691532 24.602668 -15.597450 -54.255323 3.812006 -96.127532 62.108268 46.826575 93.608199 2.507806 -3.276542 0.000000 0.000000 0.000000 +5124.821874 -507.769964 -1184.130740 -76.923342 50.205969 -133.763166 96.315723 -24.296472 -6.824588 81.912738 19.963184 -31.656273 98.391284 36.856418 36.418212 9.921789 1.000161 5.714127 -89.663188 62.439821 26.026284 -62.361175 -42.175537 -69.586675 71.098032 -10.841268 43.157375 -41.212025 55.552883 -94.344032 83.738732 15.632006 -29.021718 169.850402 -17.524901 -83.269824 -91.906373 -44.846646 -2.753784 -31.945071 50.219210 -70.387343 -74.872756 -4.474339 -8.602372 108.704681 43.299938 62.341801 74.492461 17.582205 92.351509 -59.072165 -6.510728 -77.600915 -115.828372 -11.560352 -9.370958 0.000000 0.000000 0.000000 -99.188380 -41.322993 52.724137 -42.088529 -35.694563 50.541553 -113.554132 56.045306 25.805267 98.336385 -3.732808 -3.273407 0.000000 0.000000 0.000000 +5015.779493 -490.591803 -4862.235761 3.116766 -24.505990 137.578266 99.755154 -44.202786 -0.392806 95.156612 26.501329 -23.042864 96.992481 27.390734 26.877433 4.136123 14.796996 -11.084781 -88.560353 58.823665 15.637553 31.624774 -76.734082 -173.612942 71.678856 -2.359599 58.445449 37.630020 42.798339 -1.322901 84.398631 15.499398 -30.326037 141.860283 -34.738268 -76.753251 -88.601580 -47.946647 -10.281253 -52.336991 49.656938 -66.230498 -72.701727 -17.151651 -7.954564 112.311958 14.051210 41.646147 -79.289294 -38.510795 -85.061061 -62.678999 -20.766473 -88.865743 -121.939756 -12.397845 -10.148301 0.000000 0.000000 0.000000 -85.983063 -39.682911 53.009257 -87.962951 -12.973481 90.136399 -140.481747 36.261626 -22.121616 97.535216 -3.762048 -3.305336 0.000000 0.000000 0.000000 +4230.470576 -170.023977 -5509.539418 32.199337 17.745476 -113.240302 87.393610 -42.441563 16.041712 71.579757 65.086063 -69.870036 83.655014 -5.964948 17.356853 -31.086197 33.499615 -60.554227 -79.051601 53.590181 10.928147 73.450403 -16.623980 125.901052 66.770204 1.558490 62.245735 79.406444 15.380054 5.356847 79.302416 15.855459 -28.166501 147.676660 -49.071592 -74.443098 -85.255605 -41.472223 -7.158809 2.265054 41.164867 -53.374891 -44.624512 -35.010015 -9.393894 111.783350 5.484402 48.224120 -81.171284 -17.491504 -79.449865 -64.697591 -0.216474 -71.448120 -120.081942 -13.183945 -10.209434 0.000000 0.000000 0.000000 -83.065459 -25.119810 46.050166 -82.276198 51.366536 126.583582 -133.259220 30.373316 0.552479 95.478388 -1.248913 -3.167865 0.000000 0.000000 0.000000 +724.679112 -7.083188 -11334.665200 71.200571 -52.726437 -19.310608 88.492504 -39.845716 18.641941 76.357160 10.122776 138.700366 93.009296 -32.376453 28.231273 -33.351597 32.865129 -67.727169 -51.531519 -44.805931 -134.676270 75.668532 19.148615 122.486322 69.459917 -30.795009 53.443183 60.993909 -14.620707 -5.465593 79.137579 26.053230 -39.383632 117.811149 -66.592562 -30.389144 -80.072752 -38.439215 -4.326367 29.948038 30.227063 -34.562004 -24.062891 -14.020542 -38.294158 99.961451 0.383958 46.931673 -83.207207 -15.108992 -76.706456 -52.403216 15.825508 -61.165683 -112.816048 -13.586370 -9.047696 0.000000 0.000000 0.000000 -74.973237 -21.008027 46.229399 -161.432782 74.827477 62.743348 -151.001989 62.995624 -29.462068 95.846267 0.042509 -0.837056 0.000000 0.000000 0.000000 +-2562.115366 -680.742498 -9641.292167 61.021948 -45.900157 -118.586135 80.465803 -42.622857 51.419857 59.917082 74.148907 128.396968 95.831874 50.004643 72.395076 -79.701843 -58.837806 43.366060 -82.577884 18.606230 -19.915507 79.722927 72.014543 132.196399 84.114528 -17.496459 34.624955 74.946281 -10.517152 -3.100660 82.010972 41.684784 -30.553173 98.733317 -59.857667 -13.043467 -85.066969 5.873058 -7.746414 40.262883 31.354155 -34.448998 -59.978724 39.691805 -47.378922 98.576284 -43.092094 54.446500 -84.078873 31.657198 -74.014086 -95.051952 -15.505122 -97.307694 -109.228260 -13.817588 -6.583316 0.000000 0.000000 0.000000 -78.971911 -4.556522 63.939641 93.205443 33.474045 -64.393960 170.282079 4.989396 -90.768414 93.196799 5.209134 1.856029 0.000000 0.000000 0.000000 +-1384.172347 -1119.650702 -10476.293906 -23.893008 -18.648963 126.941859 86.857199 -40.301507 22.717798 -50.446825 72.987532 84.606566 -101.819824 55.227538 -85.200029 -62.832897 -36.194943 28.620308 -88.624828 22.779304 -0.621858 -128.444855 80.628991 -65.393610 83.528332 7.898047 -29.163769 74.468964 69.391330 2.638368 73.080386 40.246385 -39.585364 92.275918 -50.967905 26.023238 -53.712875 65.244798 29.454439 65.917372 -0.140744 -30.147241 62.381809 45.939194 56.067706 92.696950 -67.007567 87.979471 15.591320 70.031437 18.697445 9.660326 31.841836 55.949753 -101.734855 -12.262386 -1.474269 0.000000 0.000000 0.000000 -97.237841 -62.894653 106.012738 89.201995 24.024116 -63.888243 75.731973 -26.249955 -65.546806 95.658064 9.663248 7.896931 0.000000 0.000000 0.000000 +-362.713901 -2017.556219 -9831.515909 77.179568 53.163767 -164.461554 88.158510 -39.620450 20.838523 120.851067 19.588697 -69.846415 158.495064 -66.482589 36.155655 -35.996914 12.686345 1.871831 -93.913692 -14.900523 -15.253609 -114.502687 57.059735 -14.689016 103.158667 21.067319 -42.765712 6.577516 48.138305 -43.219024 81.200253 -13.023006 -45.170858 81.679697 -1.412248 62.750359 -33.733141 -67.470074 86.834564 -79.804740 1.515302 59.385365 -93.539327 14.835636 58.903151 -105.025093 37.970326 -72.643184 13.455125 70.967512 14.566529 -47.222787 25.444518 10.744086 -87.768059 -29.710739 60.478384 0.000000 0.000000 0.000000 107.472940 -20.647947 -68.819373 -65.092462 -12.622570 68.887398 5.862043 7.517652 94.311470 -92.449485 -30.279304 -16.072173 0.000000 0.000000 0.000000 +449.121138 -2257.922552 -11536.441869 160.666714 -33.044962 -126.484054 82.976902 -46.635963 28.109959 -112.602468 -34.929293 125.596608 -122.733758 -58.037685 -56.656796 -53.858333 -20.086921 7.505627 -91.353305 25.097883 0.559007 -112.968057 55.323181 -48.331057 103.766481 11.324891 -74.650349 5.356296 65.466724 -50.355806 -35.183836 73.729453 179.281172 83.231037 -5.459042 58.982675 48.142896 -41.663899 93.754928 82.827902 -50.923315 -65.802083 158.428656 -38.570497 -82.853544 -102.013879 19.733130 -66.963877 42.898395 65.876604 47.290775 -26.919077 22.991098 10.821222 128.531045 -12.834294 -4.886482 0.000000 0.000000 0.000000 105.528024 -32.837953 -75.421766 76.427549 -11.178772 -83.051989 47.758532 -4.471398 -51.915745 -168.719092 -22.075898 -34.068856 0.000000 0.000000 0.000000 +-1363.863399 -2568.688177 -11708.678223 78.101665 69.543824 -159.882899 91.380363 -52.108612 26.572446 -103.704283 -5.948231 94.271877 -107.027583 -34.298634 -79.925731 -0.020345 -7.866876 2.434704 -94.090382 30.884744 -0.904479 -114.854562 60.393225 -66.126799 132.047319 -69.359095 -142.131751 -30.247764 55.000088 -112.769395 -27.165310 -80.985689 45.406939 86.749146 -21.105988 38.800286 -4.288426 -81.527217 34.225049 89.287829 -30.469514 -82.645713 154.013241 -12.305023 -98.703536 -103.904555 20.360820 -68.830213 69.255694 60.815345 73.327770 -38.546129 -23.252519 -31.229580 142.110477 -15.965282 -0.712369 0.000000 0.000000 0.000000 108.150774 -33.655745 -76.911788 38.975284 83.012831 -157.267802 58.171219 22.973034 -64.670339 -173.092003 -5.648315 -30.507397 0.000000 0.000000 0.000000 +-1687.786358 -2877.109808 -10156.534280 -169.105089 -0.078392 -112.552742 91.825944 -50.449925 36.411509 -100.767374 -0.517973 87.130123 -124.703286 70.742847 -133.154720 18.886724 -15.250300 -9.097487 -95.695085 30.332907 5.230720 100.054671 11.038427 117.121331 113.907931 -48.089337 -115.168278 37.004588 13.344688 -21.496468 -89.157441 -62.919995 58.359740 80.056342 -30.436283 23.869973 -46.093851 -78.710827 67.624893 91.800394 -36.800840 -60.681964 129.642521 -38.379594 -3.977899 -99.247398 40.250326 -72.607347 66.344794 53.457153 66.672880 16.537080 8.688620 48.810884 142.911010 -24.753533 -2.751915 0.000000 0.000000 0.000000 115.442438 -72.747016 -97.200832 110.812762 40.213289 -72.780809 -132.767944 88.596302 128.513074 -167.285083 -7.181184 -34.490717 0.000000 0.000000 0.000000 +-274.918904 -3215.289496 -9685.027202 -36.075382 -15.292807 173.217660 95.798923 -44.868522 36.580212 -97.803363 5.802089 83.970938 134.876075 78.430656 117.204928 17.978470 -10.303336 -10.297061 -98.128314 32.838271 8.882185 98.911238 -13.341828 101.520202 125.918832 -61.197621 -137.984600 23.629906 9.666150 -16.041388 -102.849232 -51.474629 73.481083 75.531335 -61.407961 35.521017 -53.961895 -82.093106 67.464858 91.611923 -19.785885 -61.319494 121.380412 -21.365102 12.595564 -5.859668 85.785085 15.210548 70.435921 -8.879846 2.612061 6.503518 30.461673 0.116172 148.529204 -24.136903 -2.102321 0.000000 0.000000 0.000000 -84.419837 -33.978133 91.568122 -156.219990 8.472203 -54.636495 -156.291695 73.338115 123.117132 -164.967628 1.361072 -30.639475 0.000000 0.000000 0.000000 +-2.633133 -3563.196234 -9884.522711 -94.715039 -7.680650 91.447161 95.193806 -42.043374 48.108987 -97.030643 3.884792 80.577467 105.710261 54.918400 75.769932 15.273378 -6.654613 -6.651547 -98.064238 28.313636 8.347428 93.792218 -11.386164 98.637566 115.160578 -41.753790 -123.683604 13.434444 11.446130 -17.530627 -101.715383 -49.588033 45.931350 84.175118 -67.617972 48.632114 -80.978419 -71.585950 88.993664 92.103357 -26.411584 -51.959725 113.895833 6.778045 45.435716 54.404319 -13.638563 66.869764 84.187572 -36.991795 -53.100058 34.822952 -40.986269 86.244782 150.096032 -26.289111 -4.023494 0.000000 0.000000 0.000000 -67.366906 -14.289362 63.129596 -164.732757 -14.636216 -38.411024 -179.183260 55.952425 126.581633 -158.695893 6.639261 -26.501699 0.000000 0.000000 0.000000 +-154.294298 -3436.604681 -9352.637190 165.210742 -10.475807 100.937621 95.317397 -45.366539 52.920134 -93.790840 6.015617 78.843172 103.442178 45.642934 69.587928 13.613073 -6.031342 -5.684377 -97.206842 20.395745 3.037460 93.730536 -2.884526 103.234676 112.650421 -35.507431 -117.852969 -0.176278 22.320020 -20.625178 -103.809533 -38.713160 54.892910 80.415916 -58.555767 0.115788 84.105828 -77.772710 -68.397375 92.909859 -32.326756 -59.174965 107.965639 19.418320 68.859425 37.075544 -1.161154 26.048844 76.125543 -27.254577 -49.100713 -61.274887 -10.949824 26.533008 143.702906 -21.693421 -0.563950 0.000000 0.000000 0.000000 -59.243620 -39.013199 56.712361 145.784492 -19.117348 -54.410703 147.016852 59.136968 104.106535 -155.887621 8.286531 -21.797846 0.000000 0.000000 0.000000 +1116.136797 -3356.435980 -7618.047916 71.527792 7.578989 170.453975 93.036460 -40.912979 51.602626 -91.203376 6.714665 77.331875 104.141781 53.351955 71.442028 14.264408 -7.110537 -2.134753 -96.901545 11.302377 2.921954 89.685185 19.923678 101.809975 109.977232 -38.183666 -114.182802 -1.272832 24.327679 -18.235967 -96.933941 -40.973875 51.701191 91.011384 -19.811342 -42.179149 89.631538 -63.326088 -64.077036 89.006244 -35.336054 -60.614250 95.287992 -15.039517 56.868097 31.335865 3.201234 6.020540 80.779400 -9.188205 -50.003639 -71.429307 -41.853531 44.126170 140.450492 -20.540172 -0.207185 0.000000 0.000000 0.000000 -31.300271 -66.870239 28.632834 107.643639 -26.729491 -60.501576 -15.552952 59.450832 -62.151145 -157.462287 6.774308 -21.190497 0.000000 0.000000 0.000000 +3041.913138 -3517.186224 -6881.368872 -72.545294 26.202311 142.508900 95.889820 -44.995975 44.637198 -93.662245 -4.454033 81.491044 111.935894 60.386451 83.701099 10.493920 -1.820041 -10.960226 -93.730904 9.149360 -4.113373 91.674130 30.726612 104.133931 108.220828 -18.219284 -93.201046 5.708360 14.770364 -9.376184 -92.184672 -37.354321 53.710357 91.876365 12.686372 -54.924046 92.512523 -53.942674 -62.833529 88.715500 -36.491048 -78.370744 95.223836 1.758322 63.171654 60.690699 -9.901895 70.903585 86.595058 26.817801 -42.876785 -79.869263 -36.827033 45.924120 132.312594 -14.793632 -0.000221 0.000000 0.000000 0.000000 83.540141 -34.569751 -79.777865 113.243683 7.221249 -57.592760 -0.294667 57.903815 -25.178332 -147.734881 6.549363 -10.489460 0.000000 0.000000 0.000000 +291.540141 -2944.879339 -4572.571539 -145.119084 -27.714660 144.495271 93.322211 -33.602552 52.923456 -91.471115 -12.684689 94.340108 108.526451 48.006847 68.759629 16.639616 23.340836 -4.125295 -92.980285 10.250158 -9.300187 91.442874 47.199634 106.600743 111.393024 -25.120134 -98.507779 1.782554 10.800426 -19.087802 -92.576625 -17.187008 61.674567 92.015245 14.712791 -52.137797 89.428858 -47.921973 -63.203595 88.841557 -37.137451 -71.382002 104.007931 15.888826 67.546609 70.305479 -29.164065 90.905967 49.102608 23.213748 -42.030878 -88.131798 -37.307095 65.053812 130.151230 -13.395842 0.439266 0.000000 0.000000 0.000000 81.854569 -29.091273 -78.930810 102.925742 8.684830 -62.801515 80.732046 40.205824 77.568309 -140.520219 1.629863 -2.778643 0.000000 0.000000 0.000000 +336.689236 -2943.862738 -5797.552004 -67.557006 49.749205 89.956102 78.990640 -46.691764 49.304088 -89.726273 58.842760 61.112883 128.256876 67.161866 106.978789 6.879773 9.975217 -7.195780 -91.695770 0.705465 -36.196119 -87.541865 67.695844 -53.517880 138.639965 -18.754074 -89.492132 -10.736291 25.018207 -13.412997 -91.157179 -18.676113 53.201994 90.998284 35.177246 -51.386972 92.327222 -48.660406 -69.029365 -82.979084 39.294985 73.790828 100.250880 20.283375 72.527416 81.888833 -27.669460 59.461594 -148.641698 -8.962456 39.651451 -93.810516 -27.208937 81.104435 128.878087 -11.866543 -0.986379 0.000000 0.000000 0.000000 -75.501831 -67.230344 75.638209 123.136555 3.958709 -52.552703 117.958053 75.546832 153.934024 -132.027156 -7.137127 12.027884 0.000000 0.000000 0.000000 +1847.506373 -2261.077093 -6633.375437 -177.098055 -3.663289 52.769699 85.411715 -54.185276 31.489958 175.395812 77.636586 -26.645108 -110.147939 -33.854669 -81.393320 11.636971 -10.636008 -7.344647 -93.510915 7.397570 -56.714548 128.139011 73.607632 153.547591 -169.318891 -48.555139 90.640631 -4.735142 30.335692 -4.161888 -98.889451 -22.206474 67.924035 92.531242 35.467682 -63.850898 96.231303 -49.459510 -76.105441 -130.846669 61.303803 -72.421874 96.533513 8.816055 76.199620 78.470373 -32.650674 52.065942 -134.948982 -43.784628 12.046233 -75.692284 -33.366670 74.158879 127.807605 -8.879881 -0.653879 0.000000 0.000000 0.000000 -81.644474 -18.398415 75.022534 123.087173 1.994214 -53.511980 -157.421299 25.567460 -112.508879 -130.737755 -13.959942 12.133800 0.000000 0.000000 0.000000 +2220.009158 -1924.062201 -8607.566648 80.279781 -26.413452 83.630994 105.403327 21.321190 -0.279498 0.470841 36.085737 139.793052 -176.357103 -70.063812 4.178708 4.158701 4.909069 -37.112322 -91.059590 -0.103122 -76.823210 -128.946908 -61.296875 -34.152569 -138.064136 -42.117076 77.276481 26.834878 55.370080 -31.241609 -109.271966 -14.288612 65.390701 90.120025 47.289339 -67.199330 94.020924 -9.671031 -75.102204 -126.098704 65.563569 -56.601888 102.235124 13.131472 71.083983 63.874827 -18.253009 34.310880 147.840120 -17.653152 56.123462 -66.443577 -20.042630 58.687069 129.545225 -14.252659 -1.558110 0.000000 0.000000 0.000000 -77.061408 -4.741899 69.749043 134.376612 0.470494 -49.919502 171.331548 9.873461 -104.084398 -142.649771 -7.045079 6.205294 0.000000 0.000000 0.000000 +-449.827937 -1744.721136 -7906.337883 -8.047762 28.510749 54.930234 120.214917 41.628738 18.327146 68.712435 -43.418072 142.883201 -113.159766 -42.804468 1.274824 -5.602715 -20.666649 106.545314 -102.188493 -59.885252 -85.279690 -174.961724 -68.105283 -1.813038 -155.177193 -46.356598 61.285655 69.588417 54.774656 -5.863894 -127.291158 -0.439592 12.001234 103.914095 67.773298 -50.752505 96.867769 -4.616211 -77.676690 -124.351678 64.248950 -53.264483 112.808091 -26.258240 76.896099 69.900207 -23.644351 3.305522 164.224111 -23.997109 50.002696 -75.239331 -39.933550 49.357950 131.032051 -12.603273 -1.196547 0.000000 0.000000 0.000000 -78.554899 3.545503 35.845001 166.978707 -9.479645 -43.488933 -177.720623 -26.980659 163.054569 -137.832276 -8.702566 3.490603 0.000000 0.000000 0.000000 +-804.840741 -1500.256537 -8395.222839 129.284015 28.578124 152.448544 165.216171 62.807441 -10.630269 60.246367 11.238194 154.774107 -90.529510 -22.753125 7.973080 -71.491150 -2.395561 173.109869 -112.486137 -70.236276 -80.909983 104.611414 -55.294223 -26.225960 -170.912464 -59.884160 55.831668 -159.481412 -25.312615 -87.532530 -128.940546 -10.237599 -31.558134 88.143241 76.851174 -52.236620 108.065147 72.963328 -61.430489 -118.800700 26.260593 -46.308838 -166.575846 -65.341380 16.270579 85.670764 -23.994698 -17.753211 175.361586 -9.653245 39.749694 -145.250989 33.104631 77.696569 129.673559 -11.376059 -0.315894 0.000000 0.000000 0.000000 -103.526073 0.585034 -8.554697 172.262079 -17.114102 -25.263583 171.249638 14.184104 -68.319572 174.709842 -26.058731 20.221683 0.000000 0.000000 0.000000 +-1714.307589 -1609.647040 -6683.843560 177.250399 -38.736522 84.366792 144.611786 67.980397 -5.664436 63.434586 21.198432 159.841452 -95.983220 -17.064560 11.903277 -72.272242 10.311404 141.270343 132.507050 -80.140341 41.012316 85.275175 -36.392513 -23.254987 116.812310 -33.775151 82.187081 178.404088 -21.286927 -66.601236 -130.594702 8.627962 -38.866881 136.983727 80.253024 -2.181399 -113.347004 69.513047 74.395271 -135.047378 29.849678 -59.669207 -162.111368 -50.773325 37.668191 87.107181 -16.278403 -21.866013 167.691089 -23.965662 45.519823 -125.113548 -11.997931 -23.854151 149.006004 -13.971076 -0.516456 0.000000 0.000000 0.000000 -92.585949 0.959898 -14.110039 163.612549 -12.099543 -13.102144 173.676220 12.106484 -66.970543 127.985137 -7.201018 39.648462 0.000000 0.000000 0.000000 +-1755.190418 -1813.028143 -6558.969429 -99.882208 -30.291513 -66.293192 142.881497 74.653872 -7.919983 34.533501 50.398956 171.738881 -66.794566 70.636132 95.905407 173.813262 25.082175 12.930680 94.081636 -78.559146 82.955894 92.380090 -17.854347 -30.248580 117.776281 -66.820247 77.492107 136.496468 -20.379286 -33.402651 129.581990 -34.156596 -62.565359 118.467259 76.973755 -2.957729 -99.301515 50.308385 57.364371 -148.190784 51.185132 -67.001967 -157.223521 -30.974713 65.186511 85.657561 -19.481574 -22.620544 179.701436 -17.876742 53.182257 -174.757230 6.562854 -6.882072 147.261350 -1.143525 28.447574 0.000000 0.000000 0.000000 -94.890631 0.832804 -19.762205 156.324997 -2.353467 0.376329 179.153804 10.419209 -43.032321 130.569851 26.361284 42.882562 0.000000 0.000000 0.000000 +-3463.559997 -1860.807928 -7111.116132 13.379922 -39.963233 92.616335 102.444528 78.810080 -18.257485 39.314669 45.294693 172.969413 44.035023 73.216344 -148.780889 179.543167 -1.515930 -10.271712 -22.580663 -87.885492 -141.903205 88.738522 -11.331563 -32.699595 106.155684 -74.763065 76.980345 150.327482 -49.659311 -45.020767 156.403706 -80.973167 -58.355347 110.028690 72.100643 -20.060744 -92.556019 54.754983 77.939265 -115.393313 42.725585 -59.753519 155.874085 -40.474866 60.429226 89.975687 -0.402103 -41.690409 -161.582219 -20.380913 82.932202 -130.573041 -6.315825 -34.470580 -108.066252 5.300110 -16.995724 0.000000 0.000000 0.000000 -88.303415 -2.791061 -31.785534 154.353019 1.166453 4.861591 176.868987 12.193914 -29.260251 128.145443 32.715947 27.242586 0.000000 0.000000 0.000000 +-4048.729634 -1561.118841 -7831.993697 36.770377 -24.149806 -30.235997 -178.724522 63.412687 41.073707 14.646223 49.182616 173.539280 23.521521 70.926812 -107.875918 -132.711706 26.440873 67.530666 13.226249 -86.176956 -82.585180 120.977476 -63.484051 -28.252305 -84.170353 -41.207013 -90.840117 96.839381 -16.085545 -41.792284 112.244427 -51.784291 -53.983391 94.596770 78.576670 -36.835736 93.734963 59.965445 -82.193374 -122.207859 51.448889 -32.313889 -90.128079 -33.366897 -54.558947 93.312082 -9.367532 -49.634084 179.834240 -28.662972 77.793077 -135.382065 13.502445 -6.137107 -147.954809 8.091155 -18.424337 0.000000 0.000000 0.000000 -93.498484 9.526222 -41.420762 149.136368 20.234659 30.017053 -175.116620 3.215265 -17.170204 175.250711 38.648727 55.218303 0.000000 0.000000 0.000000 +-4788.190763 -1191.187079 -8382.492396 81.754328 -17.402831 170.926795 152.151110 75.568538 13.034624 21.153316 51.116484 170.328773 -76.429350 43.576432 85.541119 -22.216286 3.784628 -148.442264 65.522086 -67.770747 -116.344645 146.221402 -67.814706 -33.128551 164.829832 -87.524943 16.680989 92.049754 13.358075 -77.490646 96.275262 -78.121695 24.864883 23.680289 -65.575078 -88.786738 -25.837005 51.143774 94.033339 -86.059584 43.881688 29.683264 -75.935582 -28.630992 -79.911919 87.675948 6.559148 -71.536239 -143.613766 -16.480281 92.053977 -142.806249 28.498038 -7.994274 -170.756977 -14.258423 -63.525073 0.000000 0.000000 0.000000 -87.421689 -5.089190 -51.675490 160.756689 9.883280 81.795181 -161.131432 -8.936311 0.033248 147.708404 -8.509995 63.932508 0.000000 0.000000 0.000000 +-5939.419046 -958.110846 -9544.735139 167.431295 27.946127 -53.445092 178.749613 64.011528 31.881390 32.901067 60.176522 169.741338 -74.510455 -40.379118 26.309507 163.970375 73.366380 -64.180336 -65.978718 -68.424044 15.578039 -139.257557 -72.347470 -57.946483 -58.395139 -38.926600 -103.961656 -41.818750 55.361216 -9.930785 71.134756 55.268654 128.016146 -89.078454 -62.111905 -47.083357 84.626939 -44.327289 89.170130 -92.475765 41.197410 22.807175 -70.454353 -11.676685 -64.874525 -136.065831 -37.177429 53.375306 57.101561 65.062553 -14.436416 -148.700239 7.102033 36.056405 124.987132 -25.881301 -25.703855 0.000000 0.000000 0.000000 -158.696742 -20.879843 -58.270238 -33.057584 -63.425398 25.896820 88.620793 81.528945 -71.465743 -115.219230 -27.252481 -16.208298 0.000000 0.000000 0.000000 +-6783.437926 -962.248907 -6820.153773 114.989015 17.844788 77.561124 161.647092 64.946204 19.764066 45.703799 41.075806 158.555333 -78.955675 -59.162535 -18.195697 -49.786026 63.633904 43.634943 -88.558967 61.611608 53.456145 -100.117605 -68.033522 -55.397914 20.937147 -80.688573 -62.488283 9.746400 10.517103 10.839197 72.636547 44.800982 115.199909 57.848324 48.813550 1.998706 -41.671157 -65.325854 79.025691 -85.021115 52.807541 50.510510 -62.210115 25.909673 -61.875757 138.895485 49.566322 177.595465 35.040451 49.767498 1.480472 -97.094135 -71.740301 47.308589 101.649938 2.796795 -21.259311 0.000000 0.000000 0.000000 121.775542 -26.640810 -79.300637 -80.329861 -27.046093 86.475271 65.772066 23.135851 -83.304290 -124.381688 0.973003 -33.643357 0.000000 0.000000 0.000000 +-6701.025591 -927.737555 -7540.654147 -74.093530 70.673212 -149.487320 169.808931 62.398180 9.830182 -88.169751 71.126649 62.583638 165.800053 -79.677938 19.063307 11.861099 -30.398468 7.934462 -95.034181 42.682382 20.487479 84.507104 74.123612 132.979953 -107.784920 -48.031147 78.553567 1.284077 45.834142 -25.433808 -97.645634 -36.106667 -22.669428 -42.570441 -59.505096 -62.215552 -79.795034 51.605153 83.135988 108.342655 -14.931563 -88.818657 102.599172 -67.426169 76.853315 -128.425970 28.295439 -67.964705 53.817241 20.528883 37.077336 139.381720 -80.076210 172.639899 132.570350 -23.578653 -1.342363 0.000000 0.000000 0.000000 123.844051 -19.326848 -82.930551 -72.775162 17.570380 68.263082 86.070585 66.705999 -15.246971 -163.515146 2.797497 -38.748489 0.000000 0.000000 0.000000 +-6704.857144 -854.391607 -8140.129042 -6.055987 9.553983 -167.302709 133.400807 57.811282 -17.437059 -91.214553 27.957671 72.068950 102.335571 21.454059 86.373817 9.541920 -27.692418 -6.052217 -101.255056 53.467751 37.463915 94.512585 46.516666 126.154376 -103.617585 -40.398806 71.015792 32.982227 19.129385 -31.289280 159.039197 -67.423107 84.928578 -54.977959 -50.625130 -80.378245 -80.746288 68.480312 71.982856 96.785665 -68.097958 -84.831535 85.108465 -1.757614 95.828062 -121.628475 21.546742 -74.790878 56.051883 -0.770653 39.415113 60.618336 -67.912205 -96.913912 140.808178 -27.770041 -4.673112 0.000000 0.000000 0.000000 114.522296 7.866553 -84.015458 -72.850591 8.010265 79.640777 69.526130 70.251991 -18.602109 -160.859797 5.274776 -33.489420 0.000000 0.000000 0.000000 +-6107.744286 -882.184332 -6752.167514 -8.853063 16.868370 95.054146 94.348221 40.742549 -21.179913 -95.874898 12.538976 79.080760 -175.619421 -80.572081 -10.657809 11.498727 -19.332636 -1.106431 -100.578465 28.441317 41.307397 86.660528 -26.475825 99.177026 -107.747991 -49.309128 81.027855 -14.791085 20.653411 -41.687859 -94.030365 -50.884757 2.202685 -78.292942 -55.892987 -52.219431 -59.866203 71.616010 61.607433 94.305346 -33.760277 -67.010148 111.359620 -62.799002 40.383496 -108.016612 6.969225 -60.150162 53.563532 12.905640 54.968874 -17.572050 -66.011911 -55.980404 144.736634 -28.514373 -2.762853 0.000000 0.000000 0.000000 118.625900 -16.210710 -78.589935 -43.781080 36.384575 71.847410 127.577614 57.698771 74.807271 -162.323779 4.562719 -33.599939 0.000000 0.000000 0.000000 +-5580.435011 -893.370977 -6678.842408 -21.901610 20.162237 -13.679212 97.807462 25.952894 7.373989 -95.842947 3.728492 81.152069 100.488230 -0.604653 83.146535 7.187962 -18.279772 5.900823 -97.154872 16.657090 59.813440 89.050590 -31.952763 98.840060 -113.657988 -65.049157 88.364121 -9.254382 26.170190 -31.401400 -88.428679 -47.802993 8.991411 -84.094291 -52.700648 -54.045920 -20.534009 69.975070 68.688270 100.514113 -37.278440 -62.345256 108.228638 -27.800471 35.184869 -104.358642 -12.432484 -63.953565 58.449478 21.147946 60.001861 -30.772174 -55.285173 -62.110742 144.680887 -25.971606 -0.041232 0.000000 0.000000 0.000000 109.073494 2.805606 -77.437096 -57.305287 21.285578 78.027035 118.959878 60.136223 61.984266 -161.066564 6.861301 -30.094850 0.000000 0.000000 0.000000 +-5617.514064 -762.580511 -8158.332493 16.097263 -45.145669 133.207534 95.495805 -4.517886 25.366339 -95.666175 -5.860749 75.600002 177.886895 -76.945943 0.319500 20.929749 -14.926990 -11.182065 -95.151753 6.370122 56.293694 88.824866 -38.334356 94.905700 -115.885927 -63.009750 84.844664 -12.648926 11.450210 -39.981938 -92.501245 -21.475299 19.888726 -89.253876 -55.315434 -33.425740 -76.226005 63.960497 69.054910 96.312149 -27.958298 -40.421590 105.007336 -8.707513 28.486674 -101.169139 -16.624259 -70.729798 57.450514 19.319095 54.008767 -89.562745 -81.601128 31.298201 138.149144 -24.933137 1.617911 0.000000 0.000000 0.000000 106.911141 -4.963418 -83.200424 -43.188498 21.847752 64.435494 128.656787 53.552910 72.623013 -169.128626 3.145627 -31.401100 0.000000 0.000000 0.000000 +-5799.451226 -463.663488 -6990.936782 22.673870 -40.683050 35.826371 99.435234 7.470764 49.453773 -92.945356 3.506033 68.872934 102.748118 -4.841584 82.037265 20.984115 -4.049336 -6.565151 -98.222124 -0.557448 52.852290 86.810474 -33.704762 80.961187 -112.746381 -41.900453 90.880153 -19.190025 7.425298 -36.210989 -94.489154 -20.652785 22.102191 -81.070147 -59.178506 -16.788274 -76.898881 -77.120131 89.634172 94.528742 -5.762616 -45.848584 104.130549 -4.460963 24.414014 -88.774550 -4.621333 -77.631015 54.122893 7.260912 40.185863 -124.223575 -68.314633 69.191937 132.795503 -21.937618 2.594888 0.000000 0.000000 0.000000 100.158286 30.681978 -91.242720 -45.025594 41.002350 -13.798360 126.821156 43.929862 70.245353 -167.543063 4.779353 -28.847572 0.000000 0.000000 0.000000 +-5107.644959 -328.889654 -8061.145019 -165.038666 -23.007664 -175.695423 97.983625 4.137722 56.947503 -93.324454 12.885704 36.845795 101.440049 36.483037 83.280447 17.965198 2.556334 -2.593692 -96.661966 -1.784889 60.760330 75.650207 -32.701407 79.570079 151.058794 53.729934 -28.333267 -12.873133 14.364933 -36.740858 -92.739102 -6.516040 9.044557 -78.164414 -57.830141 -4.715020 80.972138 -73.434709 -59.467183 94.671791 14.655610 -33.850776 107.594577 -10.217855 28.896016 -84.076990 9.671747 -54.412376 77.692695 -23.230920 -48.811063 -130.570877 -48.051298 75.627685 128.584755 -21.242371 1.203190 0.000000 0.000000 0.000000 119.686911 -9.296148 -64.140872 -83.915911 5.947565 -52.630524 134.679484 45.810820 81.343322 -162.175289 12.667484 -21.830150 0.000000 0.000000 0.000000 +-5542.325147 -134.501822 -8016.918615 92.734965 -69.493169 155.028281 98.094629 -2.020238 68.929719 -94.354187 23.493850 36.538013 108.135663 -55.004931 67.535667 14.290680 5.458669 -5.683502 -96.233809 38.302551 83.526992 71.040435 -21.531500 72.409684 147.812998 34.789880 -11.751120 -16.436767 13.764877 -47.999571 -92.940215 5.652523 5.591770 -81.286841 -47.544625 33.210121 -87.214464 -58.045483 91.308627 92.755411 64.266325 24.096198 110.345988 19.605797 40.819810 -84.763461 -9.002945 -34.333492 82.233424 -21.245179 -54.006407 -142.076465 -43.465361 83.997711 127.075475 -21.728076 1.875083 0.000000 0.000000 0.000000 116.632307 -3.323995 -44.137955 -85.053678 10.714166 -46.168572 139.974241 45.165467 95.054653 -156.058500 18.910652 -14.921503 0.000000 0.000000 0.000000 +-5919.534238 -213.467932 -7365.636940 -0.800794 50.563430 -105.291703 102.794747 -27.503858 69.907358 -96.403993 15.097107 67.366189 127.149458 72.621906 112.256940 17.838863 5.416790 15.222997 -98.031599 64.988777 94.834624 80.142684 -3.994852 68.150852 156.710289 45.676335 -41.850510 -23.786310 26.537584 -51.847209 -95.086263 -36.651243 6.369343 -84.980960 -48.320262 64.162721 -84.965253 46.091454 72.920829 163.587646 -79.745312 74.404664 142.739547 32.529707 93.593102 -87.548343 -21.265326 -66.314884 100.738972 -20.864508 -68.164248 -145.472425 -49.280963 82.769821 129.199408 -19.232533 2.391278 0.000000 0.000000 0.000000 101.455464 1.008684 -62.177639 -97.872224 19.653331 -54.862029 133.729157 41.708759 94.900399 -162.025406 16.990245 -17.132303 0.000000 0.000000 0.000000 +-4278.667812 -132.114645 -8161.049494 -42.968429 -21.411733 -49.401194 79.956410 -78.979055 107.604853 -92.626539 18.659945 71.809261 -80.039079 32.069207 -106.507263 19.977600 -7.412315 9.181442 -98.647892 84.301671 98.320633 78.516204 -31.468548 74.225231 -177.272575 41.208334 -8.289995 -19.117775 27.999244 -48.038956 102.204456 -60.441377 138.393032 -87.399578 -48.617150 26.058008 -84.135929 58.100819 4.946784 -125.089938 -76.746233 -1.685330 150.743962 13.264935 123.643050 -88.843279 -22.686899 -67.569557 116.455642 -13.204892 -66.388353 -145.549637 -75.686975 49.631217 129.501727 -21.113931 0.785536 0.000000 0.000000 0.000000 95.180275 -21.959392 -75.184860 -97.001670 18.677416 -50.694008 146.076169 20.133477 105.001823 -165.077628 15.104244 -21.039566 0.000000 0.000000 0.000000 +-3732.373820 -98.050200 -5988.494457 2.501208 44.489843 -63.327145 96.586126 35.115697 56.208520 -89.485533 13.601632 77.377068 -76.576363 -32.363894 -71.575745 13.782583 -3.316010 13.894559 -110.538993 59.448988 61.348709 85.256226 -20.585864 89.699572 -130.761787 -48.982941 73.122645 -48.763575 35.705895 -111.397572 82.642144 14.266550 48.618202 -109.119469 -42.716472 56.089428 -91.658848 54.107847 -6.133894 -137.411733 -56.049543 38.023116 -145.365745 -25.923993 63.205074 -81.975125 -25.734745 -91.780713 158.381511 -30.733968 -68.408136 -6.395923 -69.928166 -86.442295 130.342072 -18.692889 1.043240 0.000000 0.000000 0.000000 -57.606423 -55.950786 28.849064 -84.534688 17.306995 -45.949905 -160.551423 -58.471134 4.711452 -168.071373 10.805205 -26.588005 0.000000 0.000000 0.000000 +-1263.570614 -203.763235 -7899.535302 -135.300080 -37.342588 23.157741 75.521599 -84.830583 113.680498 -90.919937 15.560056 67.452954 114.596074 59.973965 91.692770 15.823387 -21.373159 16.591980 -171.311858 66.462418 16.617248 86.815171 -36.328638 96.984347 -156.002336 -34.978274 81.769935 -49.594793 35.502995 -96.441826 87.559419 24.796127 32.066855 -122.423069 -43.958241 -35.610640 -93.055702 15.803553 -44.192586 -150.446019 -51.729292 34.567864 -143.263134 -38.183987 70.700843 -81.613909 -13.728714 -100.675710 -137.882410 -0.688638 -84.771065 -22.474799 -66.230420 -81.271949 130.694196 -20.057669 0.095885 0.000000 0.000000 0.000000 -65.436825 -12.386434 40.814666 -98.902696 9.452419 -49.925265 -143.472720 -30.916443 -18.607147 -169.647898 7.956627 -29.454453 0.000000 0.000000 0.000000 +-3776.423469 -152.487539 -6759.895846 164.842788 -18.957034 46.160045 117.779297 58.724335 -38.332580 -86.730143 12.556221 78.856830 -91.544359 -37.570692 -60.660917 14.473973 -21.234213 10.233326 -110.748913 49.169009 46.862412 86.859431 -20.794257 107.683707 -130.973502 6.519757 62.698159 -82.416294 44.733707 -120.651169 87.805259 22.830124 48.454468 -108.806945 -4.734635 -97.243689 -96.418323 17.075690 -67.507178 -170.584455 4.725151 81.874137 -174.920272 -67.665539 86.001537 -79.549792 -0.809447 -100.357283 96.306603 29.719533 60.471050 -46.336070 -55.464018 -43.467566 128.911800 -15.582548 1.882874 0.000000 0.000000 0.000000 -64.828148 24.259166 46.544097 -46.694767 62.188635 -3.780907 -120.775805 -28.766071 -21.555427 -167.423269 7.914228 -26.897587 0.000000 0.000000 0.000000 +-3377.554696 -216.043631 -8238.408236 143.141699 50.853248 113.511343 102.284745 -41.416054 8.181037 -83.100483 22.382847 76.903229 -92.013192 -24.238899 -79.272155 9.408333 -13.947896 4.820379 -105.674637 36.778744 37.424422 82.669979 -33.237172 111.697036 -169.157809 54.395448 2.707173 -21.061448 48.475485 -53.072891 91.724973 5.170058 85.429996 -101.061498 -5.832554 -97.956714 -98.013088 -42.536892 -79.233033 158.116964 63.263327 10.517775 155.250810 -73.167005 114.685981 -73.416650 24.634596 -95.568599 72.396133 -29.800572 -13.224808 -58.913966 -52.029162 -22.599590 130.253534 -15.825225 0.915045 0.000000 0.000000 0.000000 -63.332269 32.260224 27.632900 -101.084635 55.565576 -54.316354 -117.165204 -29.049627 -5.258896 -167.828189 8.992867 -23.366843 0.000000 0.000000 0.000000 +-2228.203483 -280.384619 -9246.250929 -127.238807 11.792560 -91.743849 102.246329 -25.878677 19.523451 -78.087719 29.324065 82.939197 -102.825581 -43.186525 -61.442183 1.308397 -10.672468 0.538288 -105.480954 32.373610 22.247726 87.563992 -4.560505 109.120276 110.506959 17.492756 -74.126258 5.416096 22.058357 -22.331372 134.287415 -69.789685 76.162667 117.954350 2.735599 -49.647661 -123.825198 79.547064 63.270837 148.299855 10.151661 8.926343 171.463089 -70.848465 91.529846 -11.610218 67.265012 -30.663913 56.680044 3.818354 -3.077202 -69.675771 -57.309412 -7.317816 131.007691 -17.309181 -0.467866 0.000000 0.000000 0.000000 -70.204821 -60.468997 49.951737 -79.327279 53.772903 81.799810 -159.382452 -5.797798 71.934082 -168.727045 3.656338 -29.393059 0.000000 0.000000 0.000000 +-2480.830012 -463.124395 -7393.839344 162.641561 34.786887 158.902311 95.564595 47.559650 -4.072460 -34.938905 75.335805 129.216524 -101.860506 -55.434197 -35.384130 25.715653 -19.245656 -8.947324 -104.134092 16.877190 24.275991 80.483871 -61.612184 111.098191 114.932276 40.669891 -66.000005 3.455727 2.115659 -23.641939 -95.647590 -24.954142 -23.772122 117.211774 54.060026 -4.695111 92.423213 -7.235310 -68.278176 110.424710 36.588755 45.822939 144.411083 -71.807727 121.247102 61.034623 44.412765 31.412344 56.487702 5.709219 -23.406697 -168.500957 -80.145144 98.428571 130.834597 -21.108660 -0.136396 0.000000 0.000000 0.000000 -91.514623 74.694823 65.411766 -86.709890 19.949679 74.359044 -162.894937 4.244210 77.473056 -174.256105 -6.270158 -32.612369 0.000000 0.000000 0.000000 +-2903.820194 -170.648428 -6846.965157 159.108938 20.860686 61.460252 64.382428 73.091518 -45.766367 -22.068994 70.443624 148.198602 -59.512205 -53.703934 -27.281825 28.965169 -5.801556 -30.701204 -113.514881 31.069484 28.259624 51.253332 7.684318 121.160920 -105.154721 15.639113 73.933705 13.069209 -19.311931 -29.258216 -93.205910 -7.072338 -29.274777 159.730030 71.391396 -14.072075 -93.246809 31.336179 91.138002 -72.093816 -52.701545 -99.738197 101.345333 -75.924571 179.776567 86.606395 25.545094 14.534989 41.410599 9.973838 -20.488484 138.742789 -47.519733 179.312753 123.982420 -21.734091 -3.443831 0.000000 0.000000 0.000000 -94.087872 -1.420403 51.243125 -78.054396 22.180141 79.405205 -172.697140 -26.445008 -166.629347 -168.441053 -16.338071 -32.998215 0.000000 0.000000 0.000000 +-3230.730544 112.905641 -7507.190197 -153.774178 -42.545084 -49.464111 98.261036 77.556871 -24.377216 -36.882210 64.499461 150.006719 -68.252965 -6.204380 11.298902 22.352391 17.390382 -74.804025 98.735425 -55.868614 -62.680222 47.370847 36.776320 103.790227 -91.241329 -50.557569 56.668572 -35.430071 -18.055971 -50.377746 -87.176539 41.666969 -35.980777 163.331102 43.681302 80.859610 -97.741128 -12.348468 108.065767 -59.225116 -84.050507 -108.816485 84.101958 -73.327711 -164.782095 79.759636 23.869553 2.272473 39.219407 9.725610 -21.602018 122.138720 -30.088312 -143.272717 118.511286 -24.396875 -1.693655 0.000000 0.000000 0.000000 -81.956288 -17.990179 6.475005 -50.126916 -3.303111 73.464342 158.836691 -1.301467 -88.399192 -154.740535 -19.985613 -32.004676 0.000000 0.000000 0.000000 +-2423.445397 370.402491 -9249.388731 -11.512707 50.140015 -25.208247 6.084522 77.713341 -140.289426 94.157231 56.230593 -60.482487 -49.767255 43.288732 72.389476 -37.273226 55.730718 144.196106 83.239700 17.743114 -27.497405 49.291901 -14.381495 103.465727 -73.912931 -56.552464 37.403233 52.260646 -17.295163 -81.131789 -88.190634 49.460035 -31.973453 108.711839 50.757030 41.279086 107.914772 -60.046860 32.238501 89.455939 -14.520158 88.951899 97.463654 -64.434936 -148.821384 85.305509 26.690819 5.167671 38.749277 16.975580 -11.237196 128.678497 -20.726040 -133.032826 118.571460 -51.675534 -26.052284 0.000000 0.000000 0.000000 -75.130518 -22.294046 -6.074508 31.679002 -62.322528 -34.913014 162.526643 18.579509 -65.912119 92.682556 42.718871 74.854805 0.000000 0.000000 0.000000 +-1769.281471 570.098261 -8588.831920 20.380662 6.171960 178.576787 -31.266657 86.620430 175.931034 97.571687 78.498017 -73.151028 -36.577087 -6.234057 90.605934 -160.210536 -8.936891 -4.111663 68.587848 64.621726 -17.851987 -33.380360 84.271462 90.422890 -68.281540 -9.352891 40.433680 47.238792 64.452347 -158.065978 -50.930818 80.307134 -13.205555 109.499015 48.929810 40.555342 109.990431 -44.339857 12.367674 94.985891 48.219547 44.371117 101.774556 -65.890458 -151.105903 75.931016 20.059513 4.435841 39.929948 22.589772 -1.205397 100.918660 -14.380443 -120.244071 -98.449940 13.260312 -17.043199 0.000000 0.000000 0.000000 -72.317677 -19.718269 -11.042859 26.330538 -47.031212 -27.492254 -166.324059 28.175831 -60.046380 89.020048 44.961504 10.557157 0.000000 0.000000 0.000000 +91.912062 339.496271 -8476.023910 -33.236792 35.313857 13.962311 -123.310714 57.250511 60.117958 83.696790 -63.950017 124.409681 -48.111427 2.697288 100.426077 -163.963949 -49.632225 -18.999941 114.241880 25.187726 71.104275 98.853360 -75.121159 -36.237015 -74.729014 -7.021229 -25.014320 140.797415 49.686794 -63.165893 -61.392766 82.848825 -11.225212 137.229654 64.488639 31.609681 116.229421 -54.717196 41.539562 95.405008 44.340897 53.443266 100.190021 -77.840406 -169.602107 74.600256 7.450465 12.322625 48.317414 26.057220 8.131349 122.160270 -25.427008 -166.518259 -95.701342 -7.187172 -11.189462 0.000000 0.000000 0.000000 -78.398950 -26.069909 -20.305253 -22.427156 -55.263262 15.630720 -118.902951 68.923204 -45.141040 87.333806 19.309985 -4.885553 0.000000 0.000000 0.000000 +865.490738 135.988443 -9964.232058 -150.263123 19.129699 136.973195 -100.371164 64.874218 70.076928 83.288089 -29.513079 139.878634 -47.092429 61.368716 95.128373 -127.185531 -58.210550 -51.913910 44.635970 -76.020783 -156.718270 91.494326 -75.885193 -35.326308 -82.263725 -25.500104 -43.879178 177.752010 31.563898 -36.090725 87.489305 78.483732 126.556809 160.768674 55.818265 54.785915 -101.471129 -13.265588 -81.303654 97.927358 30.583119 68.161844 94.988270 -81.388822 -170.626474 68.462032 -0.752010 17.767660 51.072898 28.560544 12.397731 89.018663 -25.398457 164.105231 -93.742004 -9.786742 -10.049655 0.000000 0.000000 0.000000 -80.415659 -24.829163 -29.008567 -18.250508 -61.277390 20.584393 -87.751265 75.298763 -1.176001 93.885439 7.004742 -3.394978 0.000000 0.000000 0.000000 +-1185.604361 -262.101080 -11070.867608 123.230312 -27.309413 -27.056445 -122.595204 62.139402 47.496593 55.289969 -7.870740 159.590557 -71.664828 53.085467 62.789152 -145.586311 22.540504 -28.526347 113.718857 -77.687109 -172.680854 95.092962 -79.886827 -35.523022 -89.109265 -63.408497 -29.036982 -176.977719 32.660723 -62.691315 -164.579668 80.458924 -105.575216 -168.798246 49.049153 60.659382 -100.586679 -24.195073 -67.122358 99.873039 19.012350 81.500100 87.551252 -80.647699 -162.149530 67.156877 1.747287 26.764908 61.095639 21.913480 16.567183 104.880949 -33.737746 176.522202 -115.029357 -13.572210 -15.902400 0.000000 0.000000 0.000000 -84.111285 -26.531135 -37.669367 -31.244868 -44.969004 35.350056 -79.019965 75.993448 19.263991 104.921068 -5.019298 -1.940231 0.000000 0.000000 0.000000 +-3013.923202 -331.579992 -9591.301630 -160.500439 15.737476 139.866120 -110.311702 69.269517 57.319797 56.679764 28.285360 163.630814 -75.987832 55.450797 55.689519 -133.200452 51.177697 -7.976577 126.741271 -75.041011 145.866096 74.044379 -83.694220 -17.060013 -101.481708 -68.038001 -5.706805 -162.905517 -5.839283 -17.990654 133.935127 72.836471 -173.370461 -113.918820 20.989751 73.335889 -94.574916 -27.509217 -52.550341 100.351207 25.992988 93.149798 92.955791 -78.932601 -162.204403 71.016023 7.028499 18.918388 64.365592 20.367204 14.475184 70.115102 -34.086030 -161.230523 -116.337716 -14.081749 -15.120654 0.000000 0.000000 0.000000 -83.328172 -23.234112 -45.093483 -12.541810 -41.955883 18.892015 -67.723797 51.975211 36.756525 105.765019 -5.859071 -2.151542 0.000000 0.000000 0.000000 +-1522.285622 -192.298925 -7850.833861 -96.428870 3.081808 48.629938 -111.639201 63.185079 51.842058 25.899903 6.600515 150.769824 -70.651172 43.939335 49.526548 -44.280947 -31.176006 145.861855 -64.878858 -72.441274 -101.675902 11.944892 -83.753876 40.858193 -84.092587 -46.511532 -32.793987 96.315445 -9.786356 98.900490 78.808110 45.326553 121.271748 -97.506850 -20.953166 51.546065 -89.348505 -38.124968 -12.129933 114.485468 38.241467 76.696804 92.318151 -78.783507 -159.733866 77.245961 -1.678011 31.055976 63.384266 24.228370 28.239657 33.718792 -44.307269 -123.847543 -126.566071 -14.906813 -15.366952 0.000000 0.000000 0.000000 -78.234769 -24.300822 -38.521627 -11.748653 78.961373 -124.687283 -120.341488 10.995290 -16.663359 105.901376 -2.545344 -6.034949 0.000000 0.000000 0.000000 +-1584.203898 339.455469 -8624.234609 -46.396792 14.068927 -51.542396 -102.787354 82.754677 72.152325 90.080632 -63.663437 89.365201 -62.543584 56.160782 55.870856 -47.120127 -8.067273 142.860438 71.171389 62.414903 38.264322 78.312918 -69.536532 -30.837308 -85.549440 9.738249 -40.303518 55.208398 3.487500 92.517200 61.417766 46.286219 119.356870 -97.683836 -22.996172 43.950335 -93.252365 -40.093731 -15.686566 108.146128 42.534427 71.734242 83.193797 -75.776678 -152.965918 73.348504 8.262646 33.457454 51.389552 34.965640 28.761286 77.502979 42.131834 -117.655733 -124.500918 -12.999590 -12.997588 0.000000 0.000000 0.000000 -74.816098 -25.104845 -23.031409 -92.618914 25.672487 133.678326 -103.995283 14.088997 -1.055065 113.179140 -6.095902 -4.644243 0.000000 0.000000 0.000000 +-2346.542572 911.766596 -7648.156836 7.325649 -78.692623 -125.106662 40.887468 -75.445927 -87.374232 89.829762 -20.959763 76.053940 -66.092819 54.666403 56.246587 -13.684405 -47.873414 98.176276 56.356013 55.425688 21.835844 88.640887 -19.952948 -61.397599 -88.537472 41.352607 -53.884100 94.517702 -1.159663 -132.965591 18.595490 55.640228 101.938537 -92.392072 6.004344 57.413095 -90.782872 -43.722313 4.555855 96.398799 38.918317 74.323331 76.191356 -79.096149 -159.914775 72.462129 6.961868 54.845683 -20.863526 68.869452 -15.348193 -156.715254 8.903430 -22.860123 -120.297994 -15.648555 -11.168951 0.000000 0.000000 0.000000 -74.309877 -23.146619 7.123054 142.824352 70.512116 11.098509 -125.562859 3.771306 -4.161668 111.265427 4.284322 -10.591641 0.000000 0.000000 0.000000 +-4209.866064 1376.635797 -8133.173475 113.998806 32.475847 118.111176 -69.819888 -58.980942 -23.675628 63.584988 60.728220 54.068278 -66.217219 55.945326 62.985587 49.862903 -15.844905 0.289457 78.767934 4.886255 31.745463 75.527572 -13.040523 -59.097005 -88.094666 56.351362 -65.174245 32.755137 10.871991 32.083598 -64.326261 -25.866606 -45.130500 -91.001400 12.786308 56.920761 -90.594153 -28.414642 16.234624 92.000901 37.328504 63.916151 62.504341 -75.696937 -144.741866 70.306280 -1.806514 59.112931 10.691114 64.848265 18.200569 -131.438796 12.352745 -48.500024 -116.115491 -13.137515 -10.618926 0.000000 0.000000 0.000000 -77.930312 -24.056600 27.446872 128.023948 53.237812 -7.700492 -114.651711 -2.732558 8.287606 115.620073 2.880236 -11.841582 0.000000 0.000000 0.000000 +-4650.955278 1347.997126 -9057.549333 -67.728381 39.097527 -60.138398 -78.864551 -32.238319 -27.476560 64.793327 -64.892542 156.140703 -64.172300 61.226511 60.968717 42.875335 0.945543 -24.697490 80.064507 -40.413378 -6.993271 62.118450 11.455046 -58.325339 -92.354121 7.309062 -92.269407 41.573042 -29.212500 -14.183607 -62.946323 -48.826200 -12.473863 -88.651032 20.030125 58.692115 -92.629278 -11.074740 13.438735 83.540984 35.293067 51.954985 -22.338467 -74.474384 -99.987138 63.994403 -32.796666 72.983417 124.701272 -63.623933 98.321980 -179.809550 19.862114 -47.351649 -113.314210 -16.810358 -6.387961 0.000000 0.000000 0.000000 -85.134304 -16.799228 61.614005 126.584424 41.079622 -14.231829 -141.229033 -4.933311 -37.499792 107.518884 7.573557 -18.553265 0.000000 0.000000 0.000000 +-4506.608041 996.955988 -8786.886447 -79.515162 -58.359049 -44.495537 -79.671504 0.214916 -26.210712 -21.149271 -75.390259 -93.223214 -87.415381 63.564379 33.805620 -36.659387 -19.192132 38.417360 51.443982 -65.126989 38.136814 77.103753 47.310189 -22.232210 -102.129603 -50.424291 -88.592727 58.340588 32.978479 2.262246 7.109017 -27.144254 -76.964150 -89.471022 14.623176 62.155272 -92.618659 22.657958 24.680365 62.133018 49.673901 6.285804 -31.943721 -71.304164 -80.606289 75.146192 -3.583318 82.546874 -114.266106 -10.747357 -33.538785 162.984293 -16.225999 -86.664075 -104.739595 -5.841809 -5.882826 0.000000 0.000000 0.000000 -85.464323 -23.239302 61.625518 137.636750 32.539088 -0.838066 -137.269532 9.802519 -84.656180 113.261926 10.309315 -17.497669 0.000000 0.000000 0.000000 +-4415.904852 369.207666 -7882.404105 -68.704596 14.414620 -120.828614 -74.356962 41.812571 -29.901216 108.170974 -39.463006 166.220013 -127.794330 3.768438 -94.234096 -6.040229 -29.767365 25.680147 -70.807677 18.553867 -31.557880 90.526267 12.034580 112.129916 103.898575 -49.671651 25.323151 -38.422199 85.236951 -95.753160 76.856065 41.195197 -58.955013 17.754457 38.946755 65.618522 -86.056420 56.061897 21.934184 85.565403 -17.430315 -47.597367 -69.505077 -23.372753 -65.903486 -99.120286 39.985027 -74.135429 -122.183173 -24.282812 -52.497892 -47.217664 -77.010698 -163.056638 108.874018 -78.061956 -11.073605 0.000000 0.000000 0.000000 -91.637098 -19.678804 77.822012 -170.860112 43.362491 48.217492 -145.000713 1.894857 -50.921959 143.215184 -8.045214 5.957177 0.000000 0.000000 0.000000 +-3372.318203 -65.125364 -8212.560385 -132.402742 38.778660 111.374068 -59.172499 49.229230 -2.705844 -94.923207 14.326106 -24.426791 109.297310 -34.681017 30.159621 -11.427192 -41.151330 34.536430 -96.030448 29.635622 -0.466275 90.635738 -29.901771 102.372508 103.249343 -32.236584 33.064769 23.280802 70.347813 -18.563185 89.446154 65.372217 8.481473 -96.289729 -24.495018 -82.705389 -95.943645 42.050581 -65.764807 73.347681 -61.714107 30.909582 -32.545022 -15.161278 -70.275370 81.968266 30.494553 77.627383 93.874241 8.612401 100.889423 -14.515263 -72.689160 160.704877 118.077374 -85.305349 1.786399 0.000000 0.000000 0.000000 -82.673931 10.312398 69.032288 -109.652202 18.247672 112.218377 -150.793442 28.810592 -52.104722 165.820673 -27.873235 -26.628466 0.000000 0.000000 0.000000 +-4306.491035 -299.923315 -9012.435384 124.923431 69.999215 -90.166012 34.547234 60.761717 69.016961 -107.168397 56.550587 25.854147 100.143523 17.236263 46.788341 14.350469 -16.905721 2.725432 -93.499019 -1.837052 -6.778963 93.218818 -20.862306 102.434539 88.922323 14.132223 52.151245 -40.350477 50.978561 -65.475752 96.110697 58.182607 14.051207 92.451363 -19.842137 76.662711 -94.845202 48.034566 -94.260786 66.320948 -42.080708 0.450868 47.456023 9.353491 103.872838 70.083160 42.720627 71.936506 77.624641 15.153781 90.325521 -8.061480 -71.755847 158.938195 100.076090 -87.510388 19.301899 0.000000 0.000000 0.000000 -74.544573 39.705710 85.645493 -92.280268 5.917835 100.380313 -168.706379 -15.883990 -53.894743 130.153199 -23.736800 10.967595 0.000000 0.000000 0.000000 +-4792.040383 -963.060064 -10544.676648 -148.341137 13.520434 134.793427 63.139208 -10.777251 81.802106 -96.286088 29.903807 77.546004 95.451350 21.239427 41.212391 6.887503 -13.619457 1.700331 -88.646231 17.829424 8.232839 77.421136 -19.248578 116.198223 69.904068 24.779019 23.696607 0.186809 14.838049 -9.670434 127.438953 75.677752 69.596261 91.576444 -0.838636 78.640379 91.779524 45.232679 84.601289 70.414298 -12.488304 30.872099 22.536841 16.021211 75.321101 58.182034 45.321374 55.441342 76.010705 11.367009 90.404980 -17.052263 -74.078984 166.975398 -112.048031 -86.575182 -130.842236 0.000000 0.000000 0.000000 75.323581 -14.290696 -85.241336 -83.940790 42.436819 101.837466 -171.952922 9.593026 -4.520859 169.795015 -36.007249 -9.230866 0.000000 0.000000 0.000000 +-6756.642831 -1636.733221 -9953.524439 -144.764412 81.235060 -44.460469 81.681520 -44.743695 60.398288 -100.331078 42.622500 94.160420 102.260760 30.974374 45.289990 17.986195 0.541793 -2.141689 -90.129803 16.371603 1.418656 88.858529 69.254598 158.734128 92.203073 27.382249 -2.195153 31.784756 14.867056 -4.153118 128.292166 83.085346 48.378136 88.748724 -18.792834 71.194406 90.614712 31.544187 70.293684 68.911435 -14.102258 41.971253 44.186842 27.650069 70.888748 29.122120 60.578297 32.823127 71.679886 8.491843 80.886534 -109.757641 -52.588948 -143.234119 87.587766 -84.580487 34.606450 0.000000 0.000000 0.000000 79.079481 -37.328746 -64.342870 -87.115495 46.328171 94.223437 -179.424227 41.897216 77.862804 -99.051854 61.373604 -146.275393 0.000000 0.000000 0.000000 +-6995.394940 -2315.953314 -11382.842591 -142.613510 -7.519398 127.882987 75.222680 -51.069382 47.469538 -97.851912 33.105192 90.697387 106.579791 26.488266 74.635425 9.006498 -2.391714 3.954421 -87.386611 38.267171 7.907994 -169.888828 88.039114 -84.647001 95.523963 7.371154 -36.269586 27.874791 38.624511 -8.541793 35.381048 80.852421 -65.422279 85.672616 -28.110501 68.823725 79.445184 70.012908 57.222352 49.118074 -43.443493 40.755333 26.943267 68.572331 42.213487 71.012350 63.594898 85.527628 68.610756 16.258111 78.773905 -94.253343 18.314492 23.879578 66.365415 -55.314855 80.029545 0.000000 0.000000 0.000000 98.156330 -25.670075 -67.291876 -76.780091 49.815156 100.162080 138.813775 57.895273 49.099730 -94.372845 19.575451 107.844903 0.000000 0.000000 0.000000 +-7408.543614 -2817.792181 -11152.205969 160.740687 58.865007 -153.233703 82.842648 -55.948227 46.052352 -140.461947 -31.211431 76.233921 99.365009 25.456776 70.460860 12.042750 -9.579471 13.136210 -87.212327 33.549276 6.819790 -99.555705 82.550657 -43.544359 105.623622 28.533741 -42.627878 30.084667 10.588286 -7.213598 149.411086 82.521393 32.457791 82.426831 -35.786315 66.983278 -62.742475 44.622430 -98.428259 74.279258 -57.779259 48.910330 85.784385 64.077388 84.454298 82.651951 -13.426411 97.399094 66.578469 17.824045 63.094198 -96.215352 -0.858048 28.020831 132.034127 -49.966637 -3.634510 0.000000 0.000000 0.000000 98.254342 -56.093249 -76.553630 -108.731050 80.327482 73.725722 142.640294 84.595620 67.805045 179.001400 10.935119 -24.965458 0.000000 0.000000 0.000000 +-7134.603231 -2663.677435 -10779.247888 -68.261619 34.325074 92.655282 93.526694 -47.949201 25.905143 -98.818950 -14.318112 113.196357 96.836989 4.511551 81.876853 16.463021 -8.238471 -13.445342 -90.958344 36.394368 17.233611 -144.206827 78.461329 -102.030438 97.749061 33.451342 -72.087445 14.604197 6.481300 -16.801214 -109.534663 51.287274 117.248842 84.277038 -20.785780 66.870291 30.715868 -64.125143 72.934127 60.778461 -58.972679 44.115290 102.854231 -47.755941 50.801834 80.240692 -11.725615 72.180994 59.026270 15.930996 18.128018 -86.356965 -43.315437 21.766606 126.669453 -34.280484 -0.435834 0.000000 0.000000 0.000000 -112.620234 58.710968 80.187276 -85.539620 30.567117 58.926048 -10.567853 52.374156 -80.363739 -170.436079 12.163923 -22.840501 0.000000 0.000000 0.000000 +-7253.042143 -2740.342474 -10448.059129 168.070832 2.082823 -178.570938 92.781123 -44.284821 26.040124 -94.980591 33.263858 95.907694 99.068849 57.149487 81.338228 12.716584 -4.539698 -0.786427 -91.009124 32.030447 17.611766 112.803092 72.945821 150.265679 89.948739 19.130583 -81.481022 0.845855 17.414083 -21.918405 -90.726620 -74.022130 39.706387 80.817049 -7.558957 34.045945 40.403715 -73.770177 62.060364 72.240471 -37.516055 45.923086 105.968946 -75.898181 20.065040 67.591814 1.443369 50.502312 59.043142 13.912560 -0.324217 -60.131812 -70.671401 -9.705179 122.218027 -35.608003 -4.741525 0.000000 0.000000 0.000000 91.439112 -59.896679 -69.259037 -73.730656 48.976227 81.029802 -32.935317 43.006893 -78.887668 -174.632668 6.545117 -22.637974 0.000000 0.000000 0.000000 +-6407.857548 -2515.549261 -10338.369141 -7.878495 59.708969 74.963072 93.784077 -45.287467 31.945186 -104.332887 -44.542810 122.550651 99.632844 45.282843 78.825667 0.259588 -13.322984 33.770884 -93.991176 24.636634 15.692830 110.829615 70.762459 156.834283 96.673032 14.236290 -83.698315 6.319296 7.739750 -10.110689 -85.385863 -67.369845 33.723974 87.595856 11.305840 4.572234 78.984239 -71.373175 21.922835 82.364043 -26.522950 55.080613 121.202825 -77.596127 -79.576732 66.370983 1.950623 8.371239 62.239455 14.131070 -10.980368 81.325683 -59.521610 -144.499851 127.323080 -40.427315 -15.391505 0.000000 0.000000 0.000000 -30.974977 48.983453 84.205833 -65.385260 -1.340767 33.001981 21.115437 -40.641956 -134.096827 -160.972581 9.105699 -14.330058 0.000000 0.000000 0.000000 +-6944.928373 -2528.844915 -10010.933088 -141.699073 19.975660 -87.700401 98.006458 -50.063317 8.483754 -95.512051 -40.801391 123.984292 -101.070763 -60.330928 -52.013691 -10.190558 18.992062 -83.173216 -98.616567 28.886100 -25.749941 -128.221180 71.289570 -51.782153 94.743980 3.638419 -79.157792 -43.622297 8.729345 -95.051277 14.740800 -28.672269 -44.810005 86.937493 -0.958663 28.133538 56.731652 -73.245076 29.081387 73.549050 -52.284536 -6.640572 132.354065 -8.368178 -96.881424 77.911024 -34.001902 -2.312769 65.806479 21.150708 -11.069766 -11.087001 -22.119344 -90.710064 144.640810 -47.043394 -60.724983 0.000000 0.000000 0.000000 -128.328284 2.749915 -33.810960 -76.029421 -12.934178 6.974841 23.879584 13.240915 -127.767273 -168.241786 -13.960744 2.377396 0.000000 0.000000 0.000000 +-7114.584269 -2604.514349 -8960.733334 21.213533 -57.090771 105.695596 104.230281 -59.746018 11.962024 -120.282376 -62.985343 -177.077394 -100.476103 -47.969450 -83.736557 -12.304274 -19.472028 116.726149 -98.043415 -5.932745 -39.196329 145.225324 43.965726 -164.550221 115.008330 27.890729 43.341317 -67.623167 -35.088106 -135.639137 73.358719 64.133126 -31.522712 92.794101 -17.614680 41.069896 81.772704 -67.931700 53.008068 87.472959 2.797591 -65.446402 119.029958 39.872491 -82.830046 71.251953 -33.246411 -10.699136 70.607346 23.946478 -27.298405 -1.136836 -46.906193 115.739911 127.830666 28.560196 -82.782080 0.000000 0.000000 0.000000 -99.328388 24.207235 -52.736823 -77.475557 -19.774208 25.782895 -10.119554 30.442580 -92.597422 -146.747835 -32.241040 11.300211 0.000000 0.000000 0.000000 +-5770.862998 -2851.895941 -8860.591573 42.306232 17.819811 106.738820 111.040188 -59.470728 5.315499 102.773729 -39.237801 -15.834364 -95.499226 -52.809152 -96.617301 -31.122295 -34.599313 76.324303 -99.161602 -7.056748 -53.012241 171.211202 7.309718 173.563969 132.005717 33.266604 53.509455 -89.117460 -12.164625 -115.442038 80.655538 72.469246 -37.227742 94.770507 -43.677442 60.948227 91.271122 -62.728528 43.008280 88.000903 -5.240225 -66.396485 126.847892 11.812045 -86.833262 69.586850 -61.841872 -24.506791 -138.265872 -0.699512 90.336807 5.176368 54.587490 24.939973 -93.830252 37.923253 41.327705 0.000000 0.000000 0.000000 -98.855793 -28.576058 -49.020677 -78.674097 -35.983582 -33.844350 11.951774 25.603001 -89.722518 -102.843768 -45.440427 -30.522375 0.000000 0.000000 0.000000 +-5683.622105 -3121.666449 -11042.210317 73.891320 -27.485390 31.285839 113.887818 -61.588400 3.992811 -166.861013 -76.269638 -134.173895 101.576019 36.156632 69.895593 1.459114 -12.893471 26.185552 -105.631621 -24.247446 -34.947898 174.572274 32.800212 140.245200 113.561455 -30.664862 37.221911 -109.129734 22.938323 -142.676847 85.387039 34.498087 -53.708727 97.521519 -16.474699 54.720182 101.356173 -59.720670 10.306870 90.002758 -8.797651 -62.274839 123.674075 38.649462 -82.523341 45.223375 -57.051796 5.356198 -147.591737 -10.350429 62.516909 -3.925139 58.750764 24.017886 -89.091855 37.418142 42.926542 0.000000 0.000000 0.000000 -85.836506 -9.686628 -33.847654 -140.678540 -23.076572 -3.371133 6.629084 2.386860 -99.736081 -103.394062 -49.676872 -18.212122 0.000000 0.000000 0.000000 +-4108.593173 -3393.726507 -10697.312926 -158.715348 64.274589 -155.150655 97.870324 -71.329070 37.238315 159.541381 77.014805 5.918929 101.351417 44.381136 57.199814 -5.518372 18.718639 -22.400421 -99.451031 8.559417 -39.900037 -166.153914 47.508033 115.928159 117.074937 33.956469 80.980346 -119.997586 50.422703 173.081850 86.758720 -68.926495 -65.708825 85.682381 7.243569 35.532509 81.599507 -67.317527 -6.090016 83.549386 -11.993541 -65.715307 -105.275251 12.919958 75.386345 47.875109 -32.005524 25.873299 -155.448409 -20.602010 23.217081 -66.366740 -63.280184 -98.473390 -87.652652 43.453858 41.669350 0.000000 0.000000 0.000000 -51.931585 47.222633 -5.676157 168.205995 3.066586 -3.651193 -82.706505 -2.932076 38.758774 -100.032128 -47.656157 -19.335781 0.000000 0.000000 0.000000 +-4299.039011 -3354.037054 -11859.775128 95.421531 13.846708 56.013155 81.295324 -61.099941 70.597410 -101.470739 -47.806276 114.147664 104.791739 65.322908 44.413152 7.320913 33.342109 3.761802 -100.906727 37.181438 -51.934439 -119.070023 -37.794928 142.096110 113.270682 -44.960875 63.319765 -6.556087 23.034938 -0.058923 -54.136877 -88.829937 68.857008 84.027276 9.033423 -9.616437 77.398621 -63.841588 -2.883845 82.220383 -41.521165 -56.615194 100.367592 -3.096340 -95.268516 53.609553 -12.889747 40.291555 -136.823639 -18.261016 13.458534 23.287124 -58.947445 176.439961 -90.545375 29.302517 25.175328 0.000000 0.000000 0.000000 -54.597438 9.201371 44.818866 142.871750 5.950766 -22.733492 -60.887534 28.739894 48.515713 127.246539 22.089643 71.537481 0.000000 0.000000 0.000000 +-4282.430647 -3439.789984 -11738.481340 127.338342 -57.558593 17.985471 81.446787 -39.811296 90.468140 -104.104968 -29.166338 88.743166 46.135863 -14.082392 -86.745309 -44.270341 -12.486363 -20.424092 -71.401992 63.109723 -4.653550 139.502407 71.542740 83.882361 100.708006 -7.518165 45.566544 -79.544825 -47.147897 162.450495 -68.483287 -81.631341 80.953628 45.812298 23.275852 -32.760620 9.015992 -77.969665 30.151595 75.756879 -34.573570 -52.098059 95.995442 33.422413 -54.393975 61.116138 0.730734 47.998720 -137.841625 -26.099557 -0.474102 -51.756285 -73.644516 -127.293154 -94.362542 20.087174 31.018220 0.000000 0.000000 0.000000 -52.708445 0.943648 50.069873 140.787017 15.376379 -17.074973 -93.859757 34.968958 11.908691 110.733736 3.390481 26.592100 0.000000 0.000000 0.000000 +-4345.943484 -3517.393744 -11491.188282 74.465237 31.255942 25.894567 85.490460 68.382393 83.648333 -113.694829 -71.396358 87.053362 -20.303295 38.428033 -114.761936 98.699984 4.667149 79.584195 -72.092186 58.114822 3.601695 43.335115 74.622272 34.037527 89.022921 -23.533452 48.157114 -47.808697 46.014092 -134.979837 82.674667 -52.299638 -67.585819 56.337738 52.505063 -9.820623 -6.432373 -35.249987 75.325903 81.928152 -33.578272 -58.555785 91.859418 20.195273 -61.273031 67.983372 0.994455 51.833474 -128.179958 -21.974049 -16.589524 -96.378707 -78.034765 -105.698546 -92.373918 12.724660 19.312942 0.000000 0.000000 0.000000 -53.307373 4.475031 41.973785 133.470391 25.505278 -14.920301 -89.510961 45.626138 10.999815 107.021764 -8.278795 15.348867 0.000000 0.000000 0.000000 +-3710.590738 -3226.606146 -12495.024293 -64.832359 33.106097 -140.477007 86.029688 -72.460508 85.315318 -125.063312 -67.713143 87.503838 47.569568 -62.095829 -92.066331 21.466762 -48.940092 110.102534 -85.779330 30.639287 -11.386162 -83.569926 33.843643 -51.530500 86.649569 13.273959 58.106757 2.820456 5.346763 2.826479 -23.694504 -77.560267 11.884629 99.796766 79.532307 94.807919 23.318366 -10.368154 85.094956 84.000843 -37.662497 -60.434117 97.598021 5.348581 -58.071707 103.295610 -27.263877 51.343123 -112.429162 -34.050270 -21.198605 -176.683046 -49.352536 -56.029595 -96.679046 18.731736 29.539776 0.000000 0.000000 0.000000 -37.194173 17.496428 24.475449 143.461069 34.992263 10.294707 -100.770343 43.111393 -12.011175 107.106712 -12.767560 16.239161 0.000000 0.000000 0.000000 +-3117.232317 -3065.039984 -13920.112906 -20.923658 -63.703133 159.600145 90.243395 -69.858659 61.079567 -53.657986 -83.441342 -66.042155 89.324425 31.031083 66.014213 -57.198230 -26.183697 3.065537 -90.362531 35.909312 -19.704767 153.069151 80.732903 145.776230 89.909630 21.757598 72.498146 11.366925 12.447865 56.075687 -40.374866 -73.989096 43.887648 -105.033029 -39.104607 -108.413911 -17.689034 50.127153 71.056555 77.881322 -15.263355 -55.816663 84.610140 10.288169 -24.231773 89.587042 -14.082965 40.777825 -123.407594 -33.468569 31.707445 154.592951 -40.907173 -51.281010 -102.419751 27.674127 23.851380 0.000000 0.000000 0.000000 -27.942223 -15.441497 12.407448 121.787969 35.141294 -26.811740 -95.360247 18.163774 -10.029726 106.333873 -10.205529 15.289055 0.000000 0.000000 0.000000 +-2170.959168 -3164.417914 -11727.915433 88.123366 -3.177281 39.009976 110.319378 -74.354534 24.592124 -117.491620 -1.338429 97.910520 67.151504 46.574183 -9.668247 -56.024249 -22.809608 -131.737248 -92.850729 15.361326 5.326454 105.735385 -34.730393 36.662830 83.085397 28.829209 71.709965 47.958955 29.547956 96.134446 -90.471835 -57.560196 47.531152 -177.695382 72.483230 173.090868 66.902514 -23.189735 83.314562 75.762717 -45.273861 -28.991528 97.842440 -2.659336 -34.300836 96.397154 23.346035 -0.103718 -144.642651 -49.674196 60.326091 161.197417 -38.883273 -60.503729 -106.248017 52.550649 63.379169 0.000000 0.000000 0.000000 11.314915 18.775200 0.011370 159.488770 63.550600 14.907852 -95.422276 11.691533 9.658525 103.168493 -4.941104 23.646161 0.000000 0.000000 0.000000 +-2210.638042 -3224.487005 -10827.553184 -105.497836 -63.072678 -27.413983 118.187075 -62.890546 -23.019620 -112.165766 29.713247 87.755319 88.444968 53.220309 19.969460 -133.632992 -34.970626 -84.009775 -115.401395 19.758542 7.614361 129.477510 -61.774028 -19.462438 77.931139 54.453038 51.081347 -17.655001 -11.138344 -74.095378 -99.429243 21.179757 70.500132 95.949304 -15.254822 -52.878788 -63.131280 -46.104814 85.065740 83.388649 -52.406842 -4.152075 97.465494 -0.110442 -38.748408 81.456773 6.719675 -45.029886 105.884507 -13.693405 -87.447079 -26.070565 -85.142881 171.200848 -170.901794 80.261299 -6.958486 0.000000 0.000000 0.000000 -13.573548 16.738766 -56.944999 128.386356 43.153175 -14.531360 -77.775672 16.505381 43.136201 110.003385 -3.183229 27.018605 0.000000 0.000000 0.000000 +-2331.442172 -3077.083685 -8725.098237 17.771423 0.779794 -14.592861 103.802238 -41.988044 -26.010577 135.643947 58.427913 -16.459568 102.703551 87.102701 59.462783 -30.240586 -32.648605 -50.856837 -109.997487 4.918045 4.688587 113.085211 -47.756515 -27.775552 86.891086 51.175989 18.844932 15.303828 -3.381011 -5.621763 -109.234867 32.977360 63.320893 93.282314 32.331188 -38.432548 80.880392 -68.756716 -6.497389 52.912065 -43.106293 51.501191 88.231407 5.125928 -50.878837 76.649715 -21.407377 -7.086208 72.944981 22.958285 -17.367338 26.131733 -28.724636 118.070245 139.349704 -20.757362 -11.430608 0.000000 0.000000 0.000000 34.733150 2.741977 -34.357268 172.300473 57.516301 27.866401 -134.770652 6.561568 4.737666 -115.645558 -16.781290 -53.248200 0.000000 0.000000 0.000000 +-458.074110 -2858.567572 -8252.992415 148.014880 37.397218 171.368992 91.394324 -50.942491 -6.170876 -136.833963 73.278443 77.205074 -110.758740 -29.503525 -75.176736 -82.378352 23.383969 -21.547082 -115.609169 26.125874 5.757945 124.963469 -44.761113 -46.825997 97.540999 44.250089 -27.041073 -8.832536 -22.441358 55.041902 -110.628994 6.923440 66.830777 101.275027 36.710022 -35.940467 86.832302 -65.841520 -1.352880 48.437294 -70.196277 16.455419 95.819337 -28.946688 -51.384750 77.111453 -30.978802 -7.508647 69.365939 24.373231 -8.410237 42.660472 -3.049385 107.748658 138.616801 -19.319670 -3.719684 0.000000 0.000000 0.000000 -29.120508 -0.420606 165.931383 134.984307 -7.168358 32.949281 -164.903875 59.848402 -39.557551 -124.125826 -8.754293 -17.136425 0.000000 0.000000 0.000000 +-2204.863404 -2406.513861 -10280.099423 -16.021821 52.750043 151.037261 104.082709 -50.651649 -11.687691 -121.720797 76.709160 66.673341 -109.282905 30.212576 -103.418815 -109.147479 -14.751765 -39.290128 -116.300089 3.286635 -2.431678 136.911720 -57.497995 -44.394625 94.907660 36.924577 -30.031050 -27.696739 -16.027819 22.810797 -97.916159 32.414757 63.517304 98.823759 41.598192 14.290435 86.399963 -68.475378 55.857191 75.542983 -61.458233 -3.332822 88.784372 19.330238 -77.261311 80.714265 -35.643948 51.126886 67.102443 18.381043 33.632035 24.840040 -53.206138 114.719230 136.843993 -25.148412 -9.965107 0.000000 0.000000 0.000000 85.024262 -46.716557 -42.295752 132.562818 3.717410 14.689403 163.517913 1.155190 -97.964976 -108.194811 -13.092879 -25.971313 0.000000 0.000000 0.000000 +-1731.766042 -2065.557562 -12954.613720 170.298667 33.809907 123.906686 99.531072 -47.314481 19.053970 -100.423146 36.234836 99.434273 -103.424804 -18.770425 -84.437607 -107.649165 -30.217932 -33.386264 -109.621489 25.132241 -3.727470 179.815216 -62.462485 -32.582025 92.509891 18.943374 -44.718526 -1.082048 -18.083446 62.149877 -95.500115 -13.262208 41.960165 101.259151 39.059770 27.012485 80.989379 -63.014477 58.517564 78.995573 -54.272304 11.767025 111.503951 -16.970670 -39.516086 77.405543 -12.854577 84.910719 67.195458 29.020601 45.795922 48.273310 -65.385992 33.543050 139.366212 -26.174475 -11.022418 0.000000 0.000000 0.000000 86.851970 -35.554726 -52.264345 79.091461 32.802745 -80.911489 176.956768 -5.760701 -110.669253 -124.286006 -13.727825 -37.027908 0.000000 0.000000 0.000000 +-2309.598349 -1434.526556 -14116.414863 99.191509 3.409749 -60.281396 95.323420 -40.269928 41.222580 80.886004 86.240867 -86.519607 99.844943 24.724337 86.824145 15.700828 37.172419 -71.323540 -106.877059 7.916654 -12.089653 82.863155 -47.049397 104.152730 93.214809 28.263961 -29.660962 23.232586 -16.389989 34.889719 -95.548360 -10.048140 19.548698 84.516511 11.916880 49.839646 82.520679 3.705224 90.982299 78.713122 -38.692713 23.920930 135.158170 4.549091 -135.949692 82.841958 2.155871 88.216014 58.512176 27.110480 55.129422 -59.648344 -78.077246 166.521892 138.860800 -41.283696 -22.168024 0.000000 0.000000 0.000000 110.979161 -78.173490 -96.242965 -68.578486 33.765746 91.425784 173.811840 -1.549657 -105.710394 -162.377986 -18.781290 -7.329999 0.000000 0.000000 0.000000 +-2134.305311 -1108.355658 -14288.720443 -1.950130 42.374542 31.330824 98.897040 -11.497674 44.073553 136.382524 77.376814 -46.725629 103.391357 26.279996 81.761241 -6.756202 29.021114 -77.175375 -100.347317 -6.786448 -4.061335 -86.690763 -79.068484 -72.028946 122.171920 47.014759 14.272955 -50.477798 -20.220024 141.828475 -97.941578 1.340256 -7.592769 -72.396280 -74.541618 -62.939120 -81.666806 71.680993 64.934585 68.519138 -47.460429 55.542953 105.513572 -27.119167 100.719189 76.185300 1.771677 74.389232 74.760359 35.564002 74.312345 149.910558 -51.676187 -56.379863 138.172283 -36.216738 -16.366521 0.000000 0.000000 0.000000 -90.572025 -5.126933 71.569711 150.313094 0.023546 -26.708747 170.789270 4.893458 -90.315670 177.451931 -18.430901 4.836957 0.000000 0.000000 0.000000 +-1266.158003 -552.806158 -12499.723348 -168.113806 27.332048 -94.030929 92.427017 63.914762 23.671349 -54.000301 62.182531 141.178456 -11.926319 69.467311 -76.845822 26.853630 18.665504 -30.083555 -99.773313 -12.016874 10.915609 46.984062 -50.674396 139.032344 -169.890922 30.935482 64.899119 -66.045614 -4.440176 -143.228640 -91.503494 45.504632 -30.543261 -102.026948 -44.718221 -51.781134 118.832074 58.222640 -87.886908 -67.495733 -42.786523 -95.962477 97.920281 -64.541320 115.930261 65.826802 16.093698 40.171728 68.820123 48.410809 74.771037 127.752379 -70.669369 14.897289 136.029674 -31.247696 -14.452817 0.000000 0.000000 0.000000 -87.128556 -4.262058 60.779429 156.081641 0.026746 -22.518319 163.958011 -3.886974 -104.813644 161.753414 -10.777493 30.577185 0.000000 0.000000 0.000000 +-151.166628 -381.989508 -14572.949074 -134.608184 -75.663634 -104.964322 98.468236 55.484761 49.312388 160.951895 86.818503 -27.614489 22.682748 51.843335 -79.634421 9.674999 34.484626 -47.638438 -98.466530 34.426616 36.251261 50.195146 -44.819167 135.493197 90.995360 -56.966933 144.788591 -88.425041 -2.663054 -108.110761 -90.486465 11.952684 -17.896260 -145.936460 -69.245809 -12.918734 109.705326 -55.136204 -72.929015 -80.939279 -34.364123 -88.628259 -58.421117 -66.276333 -98.364418 71.417605 11.393092 23.275156 117.990658 24.178817 79.869207 157.792654 -47.201327 -6.092239 133.944788 -19.817469 -2.618613 0.000000 0.000000 0.000000 -93.843488 8.207791 73.266789 159.319353 -1.507014 -39.440212 171.971189 7.380157 -83.417815 -159.541772 -20.707679 -4.690442 0.000000 0.000000 0.000000 +-1156.074402 -585.790777 -15459.243787 87.291153 12.816732 104.498221 -76.117601 41.988222 -109.907475 21.739082 62.866136 133.828946 11.169720 83.999376 -52.802499 32.556401 -9.811800 40.249411 60.219647 -57.962721 -128.832568 66.491931 24.257643 125.838003 95.804136 -24.050264 22.439716 -114.023919 41.731645 -138.978736 74.203291 49.777198 123.462032 69.833172 -32.292608 14.527106 91.675477 -13.787246 -81.860357 -74.800169 -22.013877 -70.987195 -78.110149 -13.382664 -48.490373 123.163870 -2.146258 13.173716 90.535663 38.083486 29.661732 150.774648 -66.242436 67.544033 152.795742 -28.859190 11.663569 0.000000 0.000000 0.000000 -95.427741 9.279623 70.032381 152.459022 4.616213 -49.231213 170.900648 -1.623053 -96.201312 -126.855027 -23.534124 -3.181665 0.000000 0.000000 0.000000 +-307.628152 -722.716925 -14970.898125 -114.700643 -23.645616 -177.607205 -77.949229 -11.264882 -99.809771 -110.919796 74.458124 -43.830439 102.768173 47.196314 33.225534 -7.193944 6.359849 4.269736 -81.172537 -38.457031 1.085182 87.359024 -37.149433 110.072680 90.171935 -26.427504 34.411101 70.875967 46.802208 52.786709 84.164234 -43.588176 -5.445909 82.597548 -26.804970 61.671129 92.229112 -14.925385 -55.718376 5.189162 -51.133223 -32.731562 -71.797793 5.783367 -54.609034 154.547898 10.820915 -124.459598 109.193197 2.554585 49.874768 -94.737768 -67.667694 -50.893624 144.661298 31.022386 -41.299226 0.000000 0.000000 0.000000 -105.567508 62.832400 67.548169 -94.694395 -6.091761 94.876601 168.547995 6.457824 -76.267081 -126.546186 -11.131787 -38.146942 0.000000 0.000000 0.000000 +-624.565133 -1052.759588 -15178.452402 -13.296900 -22.047232 -161.916093 85.437055 -16.421834 44.009480 -117.000008 60.008338 -56.782575 105.173014 21.122983 52.590544 -12.624359 -7.602929 5.473041 -86.325017 -40.922365 -34.216929 -99.445831 -69.077136 -53.009180 88.614488 0.491699 49.399021 7.855619 6.407746 -18.179979 82.826707 -28.015689 -25.833112 81.362255 -46.226316 69.558093 -78.614592 5.756131 -55.404237 71.812060 -41.518958 -54.450187 -55.104329 13.605538 1.202507 -110.098810 30.439280 -1.553326 68.782322 14.300618 73.876753 -178.750800 45.450454 -58.394152 -116.622293 -74.689287 109.306573 0.000000 0.000000 0.000000 87.295811 2.810100 -83.588957 -79.089896 75.090945 150.058654 132.623708 -21.135536 -67.949587 -129.783338 -39.124168 -17.601087 0.000000 0.000000 0.000000 +-583.121983 -1796.037298 -15335.812528 59.501643 -27.223138 -161.511521 86.708314 -19.536161 33.328497 82.375484 -70.017594 24.267820 111.946739 17.761688 73.978401 -5.137859 1.551507 3.998865 -89.195685 -15.459432 -36.995135 -112.752497 73.639827 -63.091148 89.258906 42.647066 3.050743 -2.164247 20.769519 -12.727702 -87.327796 -13.173766 104.154723 81.868966 -33.612111 69.269140 -44.177305 35.714610 40.174212 76.307819 -43.220478 -49.319804 -118.978316 78.174947 0.046120 -81.906228 32.984171 49.327156 46.840372 15.910037 39.874467 -88.533957 41.409561 1.205139 125.178729 -71.293943 -108.690360 0.000000 0.000000 0.000000 54.195248 30.084136 -42.963766 -112.300068 -12.505666 76.492569 104.968299 -3.192806 -47.035941 -151.754691 -62.830140 -0.721518 0.000000 0.000000 0.000000 +496.928130 -2419.431097 -15969.170964 171.838931 6.141643 162.104788 96.795097 -40.624022 18.473024 100.048930 -58.164890 2.040480 108.693175 5.065102 80.278597 4.086292 18.548654 -21.430946 -95.630160 -6.178626 -46.073500 -174.119727 84.654309 -130.069141 95.114958 29.360328 28.402009 -0.980337 14.968668 -14.360656 -84.637891 -25.799546 98.763973 89.658917 -45.475447 54.469687 79.907834 -43.575096 65.562451 92.372520 -59.649076 -43.797689 178.731590 -81.697251 -90.511983 -41.020509 -4.999699 81.649812 54.111491 23.295771 64.411357 -63.370209 51.209592 32.904179 106.767693 -29.748893 -56.057006 0.000000 0.000000 0.000000 65.091645 -31.740140 20.070971 -174.851602 73.259985 36.061750 102.157048 1.693123 -21.924925 -114.060526 -37.352322 -27.305580 0.000000 0.000000 0.000000 +2518.621611 -3091.571722 -14517.384127 -84.307721 31.027371 -151.989620 98.766043 -40.402716 20.045067 112.315766 -62.339836 -13.383759 106.639446 22.785636 73.367862 19.656038 6.581791 20.107266 -98.568558 -14.489214 -47.827858 123.620604 77.857976 164.168682 99.008008 25.671514 30.926372 11.504957 53.468529 -28.333083 -95.040124 -65.683877 120.307459 83.868376 -35.581478 36.837045 84.141956 -51.517816 13.868348 93.479459 -54.147598 -64.826963 -140.155186 -50.273437 -107.684662 -54.172562 -12.479721 85.620225 23.795584 54.169663 15.999470 11.181866 18.757162 67.424637 113.329028 -16.454908 -22.551777 0.000000 0.000000 0.000000 48.783381 -23.315171 49.459828 121.324183 73.621007 -37.958499 35.175417 22.202082 -50.390819 -106.763874 -21.642110 -8.027850 0.000000 0.000000 0.000000 +-632.084770 -3925.835882 -14465.487518 39.364247 -77.528819 -154.053756 100.858295 -41.505727 17.661935 123.084297 -60.532541 -12.967584 113.445408 17.043743 75.415069 16.514844 -7.628708 14.252746 -99.529628 -17.910485 -50.054681 169.418582 83.606905 -149.025915 97.623749 2.999319 28.259463 -106.291066 67.911432 -137.591082 -93.643418 -56.356288 100.304153 90.460732 -46.354808 46.390347 93.364664 -19.737096 38.687925 86.826910 -66.379757 -46.027219 -141.139476 -79.716255 -80.829755 -69.676850 -7.229158 84.312326 2.576843 63.948419 -6.322112 5.429015 16.402213 74.312844 114.265791 -15.417220 -31.185330 0.000000 0.000000 0.000000 102.833730 -55.888397 4.560621 113.299000 64.834201 -29.515850 -18.367253 27.506431 -154.855025 -113.130214 -17.803640 -14.692149 0.000000 0.000000 0.000000 +1611.004100 -4127.969915 -15721.507097 -99.511486 -29.945085 -104.242519 100.494275 -37.698654 39.081533 119.934653 -64.826422 30.556439 113.529284 23.102378 70.442869 24.588556 0.405544 2.313641 -99.400172 -21.786510 -45.823262 -148.799179 83.485581 -108.979961 98.545631 -8.744850 32.141169 -113.093525 48.601044 -139.006002 -133.299370 -70.399203 136.209543 89.488849 -38.805089 14.562262 95.391706 -3.923987 -1.420864 95.996597 -61.910433 -59.128683 -106.668100 -21.464302 -98.804724 -80.179316 -0.224502 83.640843 60.400318 56.341032 -5.681129 -69.994118 11.218610 80.163191 121.811322 -10.638641 -19.693712 0.000000 0.000000 0.000000 76.527993 -53.430407 -33.326124 68.605170 65.051610 -85.678565 -39.928304 -4.636501 175.146996 -126.625859 1.873833 -5.094253 0.000000 0.000000 0.000000 +732.873785 -3884.036810 -17224.208977 -174.213306 -27.459732 -163.531254 99.108762 -35.363246 38.713333 142.651806 -64.486076 -28.999746 114.712285 21.537034 72.266892 20.840947 -8.409624 -12.546849 -100.440973 -21.741572 -43.182962 -145.586480 79.793001 -101.735479 104.061368 -7.906031 21.069456 -29.295857 81.466987 -79.587421 -141.706744 64.311636 26.152207 83.132848 -51.705897 18.988874 94.452480 14.609569 -34.560412 88.854976 -58.566478 -49.446772 -102.351644 3.256506 -85.055852 -58.371389 -18.496050 86.699966 112.446247 22.646296 18.800404 -40.246096 2.347174 25.100166 128.574687 -15.710336 -5.419670 0.000000 0.000000 0.000000 99.826584 -23.033407 -86.237227 -14.420028 76.847803 164.207240 19.803048 26.636641 -126.403881 -150.683189 9.350573 -20.684697 0.000000 0.000000 0.000000 +1633.254940 -3123.623968 -17517.329974 -16.407077 -59.595361 126.137787 100.702750 -40.257385 44.506583 153.767572 -62.859326 -69.419373 114.732266 20.473761 75.618860 11.452730 -13.042493 -4.069295 -102.470822 -21.239961 -36.221987 -127.148276 76.892418 -86.493434 108.081751 -7.608495 0.040120 -33.087489 69.705925 -90.668927 124.145092 42.064061 -82.050890 87.885371 -25.073257 23.737888 91.452114 5.120103 -57.335776 88.591622 -30.550740 -55.411211 -110.125764 12.836045 -71.873606 -91.323785 -67.522298 116.435313 134.943495 31.916740 6.728628 -44.268469 -20.674285 -1.757094 136.613874 -18.945479 -0.420533 0.000000 0.000000 0.000000 154.299935 63.787026 -145.029857 -50.079169 -41.548731 -20.732410 25.777800 26.882821 -107.818307 -150.407364 5.742799 -22.738043 0.000000 0.000000 0.000000 +1364.561908 -2758.877630 -17848.313593 -109.223436 15.616642 -129.630019 99.570062 -42.004589 47.005506 132.089462 7.853985 -61.787131 110.535061 21.641436 78.567342 5.529950 -13.090344 13.539644 -105.033070 -5.827358 -20.654849 -87.075517 74.035448 -47.740747 116.676634 -0.220681 -80.754537 -14.832333 42.708866 -49.101717 -121.588239 63.675007 11.593813 90.538957 -78.209697 2.725256 88.460756 -35.688747 -70.344270 91.930255 -19.596538 -60.532622 129.817949 -1.048162 81.716546 25.093702 -49.567853 70.634455 138.867057 17.414659 1.835134 -52.314185 -23.563942 0.365756 141.737522 -19.523227 2.049560 0.000000 0.000000 0.000000 -83.961462 -3.562455 61.794976 -104.108870 73.867397 49.689744 37.080224 26.951621 -87.889917 -159.270785 3.120141 -24.036218 0.000000 0.000000 0.000000 +1467.220071 -2240.549017 -17698.904286 -5.855702 0.941532 -157.061770 99.218793 -45.416590 54.364124 -109.202437 32.166559 69.185294 109.207733 19.775959 75.347149 15.427123 -16.075199 -5.913911 -106.913298 12.527445 -3.728628 93.924459 30.491248 106.215014 -143.928657 -58.969941 120.812952 3.522480 29.219921 -34.105046 -134.459349 64.583894 4.713278 101.736749 -66.392789 -45.396632 90.807369 -30.538132 -79.363412 97.155022 -1.775215 -58.794667 133.569076 33.707026 63.137742 46.001750 -43.102801 50.079428 129.500413 15.144039 -2.775099 -66.436973 -32.599224 19.591854 141.369969 -19.039850 2.556827 0.000000 0.000000 0.000000 -88.493616 0.113122 61.457199 -110.986624 82.154524 70.206251 43.367184 24.105761 -66.718502 -161.075900 0.979018 -26.177503 0.000000 0.000000 0.000000 +1955.246211 -1767.326361 -17324.552379 -145.475679 -14.937943 -164.193935 100.287286 -29.932686 38.882391 -146.821346 67.858915 -13.810250 112.472466 24.388588 72.117812 -5.352749 -10.393765 1.640661 -104.173515 -31.506055 -5.484766 100.789051 -62.525391 70.930496 177.279236 -17.123184 93.535981 -7.494981 32.940786 -50.227727 102.992819 -18.919596 -85.758312 89.201990 -66.859038 4.633360 87.585595 -60.622117 -58.525799 96.010810 3.655272 -62.348012 129.449678 7.329213 77.687148 41.962867 -50.849088 46.604795 116.545340 12.911209 -4.806573 -75.704974 -30.783583 29.210900 139.229886 -16.375809 1.101549 0.000000 0.000000 0.000000 -100.029761 -12.424302 44.135614 -54.570141 26.063160 58.045054 39.092110 28.858609 -72.171844 -168.022409 -3.320937 -22.674902 0.000000 0.000000 0.000000 +924.223151 -1835.957464 -17468.219695 22.454434 -59.945543 145.226283 96.012092 -9.836577 44.246735 -109.089973 74.452864 -29.723227 110.026526 20.912841 68.053339 -22.514021 1.625360 -3.266944 -105.247932 -61.866421 -33.719433 137.708524 -79.407387 20.484894 123.403702 12.181373 66.448397 -9.669292 16.837957 -28.653353 101.637103 -15.514325 -78.849251 95.029432 -11.366964 -24.943626 93.662581 -23.682978 -65.342280 100.111618 -6.606654 -43.014694 110.339078 -52.068279 86.590239 43.583180 14.783250 44.541953 114.133900 -3.932655 4.176332 -66.369849 -13.804376 2.136787 132.763404 -15.472428 0.047110 0.000000 0.000000 0.000000 -85.666272 -45.338145 67.587802 -60.628126 11.441823 83.319087 37.116142 33.418268 -80.736441 -167.308490 -5.014906 -19.679384 0.000000 0.000000 0.000000 +711.681861 -1701.145331 -16592.780899 -167.757027 -49.279671 -95.942907 97.666023 1.969600 39.838177 -116.650088 -66.136281 -32.642056 109.352771 22.853701 60.954342 -17.127146 10.263649 -2.018907 -98.216300 -72.226430 -64.409922 -153.980336 -78.319196 -18.106766 101.853433 -63.641410 47.862012 -16.105961 28.514924 -44.558644 105.929596 -45.692090 -84.397428 95.877182 0.919855 -10.564551 95.504991 -2.633745 -30.551483 96.615194 -31.933876 9.398771 -96.807281 -22.410114 -67.897060 30.433779 40.453909 30.686838 98.455406 -27.418214 14.926062 -71.933219 -11.733151 -0.640647 123.794509 -20.349496 -33.150442 0.000000 0.000000 0.000000 94.695421 -9.922233 -102.514335 -45.311355 -9.627029 27.825072 26.995035 45.036558 -95.317249 -173.555821 -1.308482 -4.857420 0.000000 0.000000 0.000000 +-827.327096 -1889.133490 -17799.800077 102.562401 -64.060841 -115.289227 98.306614 -18.352131 47.064645 -107.104215 6.488720 -40.635882 106.611265 14.732910 68.862309 1.848862 24.227180 -4.768504 -87.683375 -77.556499 -87.917353 -163.544371 -21.500261 -60.465015 105.519689 -72.566810 19.216710 -24.955308 40.491678 -89.600914 170.712255 79.661340 2.279189 94.213663 5.293381 -18.824436 93.219009 -0.444521 -40.351442 87.279691 -56.810140 17.726188 -110.294994 -4.047384 -39.958787 51.246917 -8.731657 51.596017 99.485890 -14.625136 15.099460 -65.777363 -43.566609 -17.619974 113.076091 -29.467997 -60.198442 0.000000 0.000000 0.000000 -117.805022 -70.159449 89.682238 -46.778472 -12.278895 -12.226001 26.133617 47.095628 -119.297629 -154.333975 2.761553 9.098723 0.000000 0.000000 0.000000 +-462.169498 -2376.584662 -18175.959634 33.057876 -85.553959 -149.588850 122.533040 38.236975 22.704308 84.722849 -20.365503 146.781288 97.090485 52.306550 48.626761 1.422335 0.961949 -2.591626 -96.182852 -76.578662 -59.897403 172.936909 -59.475204 -45.836991 -104.087537 -70.657983 -87.421774 -95.273330 32.371623 -151.800063 117.161414 -23.607528 -65.448339 89.803839 10.136286 -5.501627 92.365632 22.063661 -44.671844 -37.279975 -85.120369 168.628360 -104.532194 6.813611 -23.383790 42.160510 -37.040313 54.364063 95.063729 -16.637252 18.538673 -1.404453 -47.792072 -95.821958 173.090734 62.291434 -59.254224 0.000000 0.000000 0.000000 177.408776 -62.039846 75.191037 -41.688888 -19.913824 -22.560850 -115.596046 -62.724511 -52.485758 -178.116054 -7.187828 3.466033 0.000000 0.000000 0.000000 +70.691562 -2813.948675 -18292.819994 -36.103290 -62.758212 -177.980732 140.180667 53.316176 14.266754 72.646052 -27.981004 148.864971 89.737302 62.672777 25.136114 -26.997611 -23.594443 24.682403 -90.952324 -77.730270 -67.418177 134.648956 -46.126278 -45.939939 -99.923031 -58.823610 -77.437329 -59.789299 58.090647 -129.444641 161.636587 21.239143 -65.227775 91.085246 3.667808 22.544833 93.196317 13.092297 -57.823163 -85.368936 -48.261907 -65.238723 -117.896144 -5.407398 -5.143682 44.595027 -45.242975 14.427634 94.225029 -11.547067 21.719790 6.119247 -17.569179 -130.889597 -141.859519 76.662591 -9.464896 0.000000 0.000000 0.000000 -102.945923 -12.408139 -59.178089 -64.533633 -21.306137 -42.483030 -87.569349 -46.855114 -72.785035 -163.999635 -21.910033 13.827132 0.000000 0.000000 0.000000 +26.601447 -3432.360296 -18302.100696 168.183149 -3.365110 32.461658 145.663226 66.042267 5.521202 51.619273 61.247305 166.461608 39.978072 73.713781 -125.606602 -10.036121 -3.244309 13.046900 -103.053518 -62.543010 -32.069302 125.383645 -46.180092 -39.582247 107.906098 16.888907 66.066598 -137.711622 58.400476 -179.278583 139.744443 -70.509959 -38.430046 93.415399 34.128927 15.637496 89.202532 10.541870 -66.293510 177.180474 84.061359 20.070191 -147.642054 -13.759533 53.907682 88.477736 -9.646070 -51.337355 113.811126 12.595475 27.937548 -2.707346 -21.582325 -178.196934 139.167697 -14.396181 -63.086375 0.000000 0.000000 0.000000 -96.401303 9.461541 -60.755992 -87.509757 -30.530279 -41.569719 -88.548791 -41.233635 -78.078316 -143.284450 -22.232342 -27.442022 0.000000 0.000000 0.000000 +-7.269198 -4735.706732 -17896.880505 -43.065821 28.333802 90.645666 130.609766 70.210129 -12.813818 71.694815 39.620960 166.315738 46.504438 63.427196 -117.659898 -4.777674 -2.796954 4.412134 -110.845082 -58.480747 -18.274804 105.811591 -26.573668 -32.232323 115.646354 26.099740 69.375797 -136.093942 31.648349 -107.363140 123.108492 -75.349056 -0.980282 96.119183 31.287041 35.795243 91.261950 23.520134 -66.081108 174.508688 79.603982 -17.086836 -146.855734 -22.160581 65.735948 89.569626 -11.697590 -49.097896 98.624283 42.966609 82.901410 -11.175015 -31.440895 138.132961 141.427041 11.571491 -66.423353 0.000000 0.000000 0.000000 -98.393746 23.150044 -66.645543 -123.329089 -53.509506 -15.641342 -86.636271 -38.709414 -66.652734 -131.843314 -37.427990 -18.710675 0.000000 0.000000 0.000000 +237.156565 -4557.061712 -18191.958335 -145.212859 -56.222830 150.307429 112.908964 72.943726 -25.606619 108.598618 82.426096 -165.716670 164.904361 75.371737 50.322646 -4.627797 -17.319706 31.299467 -114.509845 -52.175520 -8.426229 125.466056 -27.340641 -36.176916 133.344298 12.046880 65.748847 -108.689637 9.202169 -149.080986 124.071332 -68.055094 5.379291 100.412830 17.496531 53.793430 138.446843 13.912214 -68.027169 -138.883424 48.755074 -40.720502 -136.572774 -18.346213 -20.697686 91.506598 -8.368384 -54.949352 84.175118 41.733394 50.588678 -41.799877 -19.445663 -36.602902 130.789088 -14.083143 -22.652375 0.000000 0.000000 0.000000 -101.013697 32.664012 -66.355552 -87.194668 -40.430203 -32.877071 150.601439 60.544701 33.782473 -152.497604 7.303141 -26.570323 0.000000 0.000000 0.000000 +1852.019775 -4620.230169 -18270.661531 148.413602 25.165833 149.902914 120.825522 65.430868 -26.933623 63.634939 71.999011 166.961706 148.964367 56.632822 47.779698 -12.097036 -5.498712 13.639863 -115.055549 -46.118202 -6.596589 132.108550 -52.576065 -36.458231 -171.567024 14.715852 64.493822 -126.745032 19.756756 -176.999098 129.802605 -48.823967 -23.632416 97.902706 36.964073 30.184446 -111.840175 68.032344 73.157528 -127.116602 71.630373 27.899568 -120.726873 -20.595176 -1.616660 91.995086 -28.339897 -45.657382 66.352727 33.943058 44.930018 -30.859785 -30.379289 -66.050667 132.844961 -22.355945 -10.972198 0.000000 0.000000 0.000000 -95.294402 2.647130 -55.486211 -85.479952 -34.183423 -43.041847 -172.102203 68.298030 87.115789 -151.937033 9.050097 -26.455176 0.000000 0.000000 0.000000 +2885.865225 -3898.606878 -17031.520954 -123.470099 -66.563967 160.275262 108.725382 41.311320 -15.744908 25.985661 -79.037374 135.033624 145.788182 -6.546695 65.356686 28.599819 -4.472923 -2.522522 -115.283685 -42.993939 2.928083 125.206172 -39.512026 -46.107064 126.555642 22.175301 28.477156 -141.340565 57.646942 -125.346164 152.833897 37.125373 -64.267531 104.993660 -44.942930 62.325216 -97.193871 -19.092183 66.084282 -98.641392 74.766986 63.373468 -98.696111 -5.560081 16.773087 147.390898 -25.171149 -53.886552 65.653957 28.607175 49.669767 -130.573376 -53.745749 -3.432820 138.922299 -22.098145 -30.821179 0.000000 0.000000 0.000000 -104.991981 -16.577347 -21.147407 -57.538128 -39.339202 -12.495759 -154.163932 79.343194 106.582143 -149.593632 3.276324 -29.040928 0.000000 0.000000 0.000000 +2886.307025 -3342.161322 -18425.764378 31.617222 61.728997 75.791799 106.995054 24.681875 -11.398529 -52.810506 84.098754 170.718983 150.029069 -46.227605 64.667698 24.152988 7.748277 13.105118 -108.658213 -30.721893 5.585193 143.017038 -53.306685 -43.397427 142.749539 15.395129 -66.510510 -63.594458 71.883796 -47.530701 -149.765205 71.339911 -23.389825 -150.379283 -7.709702 -76.455653 -96.164934 -25.089870 86.300337 22.953074 -28.512706 101.706649 -94.364145 -28.844291 34.269382 108.646728 -30.769915 -55.711949 50.195046 32.204875 30.808609 -146.954030 -69.311071 35.264017 145.446804 -20.662383 -42.055951 0.000000 0.000000 0.000000 -105.302459 5.275976 14.559824 -50.543041 -33.993068 1.908334 -23.479500 64.847379 -147.762064 -155.231921 1.634980 -37.701608 0.000000 0.000000 0.000000 +4191.240980 -3029.878119 -16839.608994 159.970209 14.837293 -144.361033 106.319694 -1.015460 -8.414940 103.096432 -66.507909 -28.718036 112.780538 -1.935266 78.505949 25.228629 17.224849 -4.477812 -97.426085 -6.513354 0.590917 120.891606 -14.019806 -57.449034 146.978576 -6.930132 117.411662 -153.656688 8.917495 -104.403530 -75.413038 61.662674 89.079216 -98.164213 18.380654 -88.439301 -92.262164 -14.803978 84.925008 -50.040518 -4.270940 114.980072 -104.407536 -15.592642 66.776984 132.536726 -17.762310 -40.344615 70.219190 32.555622 54.925640 110.816042 -63.163873 131.790966 -106.505880 24.408593 37.860308 0.000000 0.000000 0.000000 -90.091167 -2.939232 14.418140 -60.921947 -35.362859 30.513782 -112.664479 63.371765 170.045995 -175.174337 -34.608010 -26.786966 0.000000 0.000000 0.000000 +5276.592226 -2754.444281 -19425.891109 87.144029 -63.130653 -44.398870 105.407028 -3.362193 -5.056037 -122.904030 76.945893 69.678511 123.077104 -58.716720 55.651513 11.504497 -0.692520 -1.685286 -94.990283 -1.321958 -22.494479 141.814962 -12.278121 -59.669158 134.843279 -6.546492 -108.154241 14.282891 81.348480 -43.018505 -106.690947 69.078308 46.694306 -155.661413 -69.035121 -67.162838 -95.995103 -17.583524 93.073755 -61.392151 26.114760 111.425437 -103.323462 -14.744051 54.058474 128.064934 31.836807 -40.207054 66.804470 40.944524 -2.926658 -75.028583 -15.005568 -57.091300 -104.096625 51.479174 45.994911 0.000000 0.000000 0.000000 -97.570129 11.034343 25.466219 -75.208319 -40.807465 7.857383 -24.446040 63.843367 -159.583080 -161.637269 -18.989045 -31.279408 0.000000 0.000000 0.000000 +3197.282691 -2424.495306 -19231.635656 68.729315 28.759399 9.677387 106.370769 -19.948905 2.232859 -134.369555 -57.774032 -41.585424 113.094462 44.852776 61.687527 -19.445073 8.465983 7.847888 -94.921068 -45.675151 -34.741493 -152.149646 -50.844415 -24.713618 140.313201 -8.988343 28.722364 -111.862778 43.950534 -176.722863 109.381138 -49.300829 -40.281142 111.121792 -70.120918 12.115153 99.834806 62.925710 -72.331077 -67.918169 25.554511 85.786651 -171.273392 -30.913927 48.248250 162.272181 65.327712 -14.309587 127.655569 44.764026 34.910387 -58.542406 -32.996574 -61.524246 131.381625 46.944712 -51.268928 0.000000 0.000000 0.000000 -85.464036 31.895772 -17.925009 -73.705285 -37.372080 -24.027878 -24.686769 68.355721 -139.575561 -148.816395 -37.810828 -31.639125 0.000000 0.000000 0.000000 +3280.278976 -2473.403021 -17006.490627 136.593545 2.026914 -83.167786 102.595849 -29.493231 12.389241 -134.932297 -54.332668 -27.289694 117.104488 35.116241 53.591207 -20.707710 5.755816 2.360391 -98.060350 -44.846080 -32.859467 -146.201579 7.885243 -53.727430 134.527911 -9.537642 -16.546837 97.015405 77.055946 24.924210 117.775257 14.116297 -53.622909 107.002668 -62.475324 8.760895 90.211481 11.186258 -77.450839 -16.963566 81.801957 -176.157806 -165.481088 -21.145698 84.934812 110.677533 20.610987 -56.727024 93.263543 47.065908 12.606094 -62.195745 -21.651898 -56.543898 131.187670 57.516650 -61.784895 0.000000 0.000000 0.000000 -81.854823 22.598650 -2.188986 -88.712215 -46.113234 -7.168608 -94.677660 73.578320 149.119701 -150.208898 -33.612305 -37.344883 0.000000 0.000000 0.000000 +387.773253 -2537.830267 -17935.282472 128.041528 -31.802054 152.007944 99.706719 -33.626761 13.295246 142.416222 -77.186496 54.773307 116.144875 27.326307 50.973836 -9.403669 15.913144 -8.783032 -97.319634 -37.397310 -28.476081 -141.664822 12.729101 -55.646780 120.398674 -17.450212 5.293021 48.629384 38.606377 17.375139 104.986397 5.468054 -55.246260 106.883810 -46.564170 -27.018703 90.058892 -10.086289 -73.379196 72.727136 70.104237 -71.493556 -170.530287 37.045584 -54.523173 105.133139 -40.205809 -64.165038 137.644136 5.586389 76.398950 -61.780857 -36.175453 -80.106203 120.697542 49.172667 -73.641929 0.000000 0.000000 0.000000 -84.632656 15.043911 11.417920 176.062381 6.648200 37.118358 -145.908780 32.287630 72.348778 -157.392029 -50.017074 -24.428955 0.000000 0.000000 0.000000 +774.586027 -2558.172308 -18711.717317 138.436614 -3.630877 12.434418 98.511580 -41.678821 19.456285 -171.026481 -66.308349 24.156502 122.353246 36.379527 54.155996 1.143898 32.741239 -10.747506 -101.929808 -43.438852 -26.748850 -122.548732 9.739347 -43.567119 117.844068 -29.988488 -2.864446 -98.330135 35.996432 -144.817050 107.374320 64.936555 -37.694424 97.687244 -41.348289 -26.931846 92.217124 51.307405 -87.410029 90.581697 36.081394 -67.992611 -136.861221 15.703042 -49.323377 104.612140 -41.174873 -57.386588 131.283385 13.021711 70.870998 -21.748869 -59.781339 -123.912523 -103.348195 -20.623711 74.784415 0.000000 0.000000 0.000000 -78.260972 24.147834 -0.381889 -174.317018 9.052177 42.621921 -149.561004 15.451701 60.196808 167.690972 -59.075219 13.208834 0.000000 0.000000 0.000000 +-148.700768 -3007.125433 -16786.595877 114.146045 6.097802 -119.807032 103.918353 -12.724090 7.772763 -166.247533 -74.424679 14.856152 122.050941 37.482617 35.502439 -13.553496 1.267894 10.639465 -95.628258 -50.822357 -25.518921 166.387678 -48.782030 -28.379868 95.720716 -30.198325 40.989129 -33.680976 70.788451 -49.345422 -161.626837 -70.850024 -131.917458 125.012753 -63.464377 1.930607 99.248825 -45.683620 -78.057627 123.402876 65.700042 -1.103883 -164.290940 40.397414 -55.592117 58.117173 -66.709955 -4.391787 155.898614 -0.279773 69.625687 82.535139 -75.695403 74.172491 -107.796474 -15.445866 60.673781 0.000000 0.000000 0.000000 -92.978858 63.556201 -4.046475 167.034721 2.892813 17.766548 -132.902395 18.242976 -0.400539 124.449703 -46.276277 51.353663 0.000000 0.000000 0.000000 +-1050.784885 -2746.711126 -17529.536595 -59.216497 -7.726847 171.597258 105.141970 -18.942039 19.109294 -110.774375 -19.364117 -19.690530 164.592833 41.636544 48.506913 -8.538963 32.985629 -17.238736 -100.116674 -44.973880 -15.996090 -164.569099 -5.854934 -54.647591 114.084527 -35.220671 21.239304 70.256277 78.761786 31.541431 97.534669 -13.219434 -38.777311 95.837844 -2.730598 -21.652705 94.096075 -6.645993 -68.465218 -109.760147 72.584977 48.116425 -120.344347 -20.111291 3.600675 90.554330 -62.380680 -44.463177 140.178756 -0.750051 71.853946 58.692094 -25.212422 106.023119 -106.674757 6.439023 75.754059 0.000000 0.000000 0.000000 -80.309564 55.264788 -4.358448 172.602972 2.558136 46.068145 -120.390809 -0.215309 -14.140966 -138.381386 -66.171636 -31.933162 0.000000 0.000000 0.000000 +-2269.995014 -3061.870306 -18668.272739 -53.287097 36.355363 33.567551 94.796983 -35.792328 3.234342 103.132096 -50.385644 99.931987 140.329345 46.764099 53.394171 10.556636 -32.296608 -7.363997 -91.397351 -2.446423 -15.401869 177.049580 57.018447 159.172652 97.884246 6.481176 52.921153 -110.454658 72.821117 -106.791466 64.506830 56.354289 -26.987885 115.605597 -2.408678 35.718827 130.108166 -66.796487 -55.788272 120.046129 -74.623103 -30.930784 -139.804141 53.521299 -73.796754 -5.613040 -73.862948 61.239719 -156.751662 -27.607675 13.801768 5.518904 -36.397865 123.528430 137.588622 61.338843 -53.456772 0.000000 0.000000 0.000000 -70.395538 61.570102 26.365989 158.073773 3.796316 3.776632 -91.879896 29.952013 11.448179 -131.988740 -64.855345 -30.814242 0.000000 0.000000 0.000000 +-985.593675 -2836.340277 -16938.135701 -76.599234 -10.033034 -95.406312 99.942026 -62.991398 23.895357 124.050561 47.187079 21.445950 139.526719 52.530092 65.322832 -8.944336 9.473983 3.751608 -94.257375 21.734761 -15.908812 131.648639 -15.064619 115.949818 126.909951 30.019960 58.631164 -74.179374 60.637429 -110.359346 59.178100 83.807082 -57.836301 106.438719 36.001009 27.694474 117.485875 -64.353891 -69.428710 120.099388 -51.190689 -62.361048 -115.941836 22.505840 -61.792550 61.425787 -46.754036 15.850522 -146.485378 -31.456075 -1.626487 6.446558 -56.843278 174.866084 159.023621 -74.031806 -173.949651 0.000000 0.000000 0.000000 -73.768303 22.995042 26.129374 160.468297 1.845861 3.250601 -123.727432 30.547452 -28.865029 -118.610921 -46.140100 -48.133440 0.000000 0.000000 0.000000 +738.699803 -2610.140081 -18376.035804 -7.662317 46.973032 -174.553117 90.649554 -53.872119 5.771736 101.427798 23.079802 -19.099377 148.649593 -29.509401 83.585449 -0.917842 20.752118 -10.654981 -93.049055 53.248703 -3.308500 -157.738286 -23.694962 64.719999 137.357605 36.363871 58.432109 -67.848549 54.953627 -93.446631 -80.386447 80.930981 -139.231364 -105.564687 23.867310 -75.875025 -90.895381 45.489358 -12.586920 -54.477024 -74.187182 93.188245 -113.270223 27.025558 -69.731401 73.422384 -56.419492 14.361403 -145.705495 -21.702573 -4.144050 9.901930 -40.777730 156.677274 -111.824310 -51.929989 79.077559 0.000000 0.000000 0.000000 -80.496715 29.362462 23.796239 156.124901 -0.526250 -1.124740 -116.230316 52.360567 -16.458420 -171.321029 -73.883317 0.703386 0.000000 0.000000 0.000000 +-1454.941764 -2291.117621 -19408.651274 6.273094 -56.170526 -125.832439 74.417315 -51.806538 -5.272253 97.264118 -5.190247 -44.478046 120.107956 50.182275 80.803419 -27.443366 -24.534072 31.051234 -95.305431 47.668975 -3.661058 -99.138023 49.286391 -46.762658 166.239846 48.923606 24.635905 -95.397638 44.271996 -143.679404 160.806395 78.600136 108.176420 -56.351505 51.663733 -71.010596 -80.157121 48.746903 -56.460755 -66.776032 9.673216 69.548575 -85.322970 33.008381 -59.504189 92.097476 -42.897362 28.768181 -136.826485 -22.962544 -21.335022 10.873591 -35.697524 142.846417 -102.946703 -49.618052 55.757734 0.000000 0.000000 0.000000 -97.232870 19.438826 20.710933 158.879920 -0.924198 -4.822460 -142.983176 51.761129 -55.214354 -108.077822 54.229408 -113.248153 0.000000 0.000000 0.000000 +-2096.993429 -1545.258246 -20094.347837 108.706845 7.181276 132.776864 71.265588 27.237603 -19.198202 -74.216796 84.419941 125.680727 -133.069085 -76.444957 15.008889 -43.660610 0.135011 -14.166122 -83.035921 39.798880 23.807363 179.579931 83.883085 -4.067155 -155.067371 -35.029767 91.218585 -123.404316 21.178609 -166.101721 97.076362 60.278532 57.908436 -78.625243 -13.632751 -78.845909 -82.452961 37.341741 -22.621213 -69.351005 38.928366 70.939768 -93.989156 -29.760634 -52.634500 152.150627 -53.825239 -23.678721 -161.935016 -55.080197 1.851422 -36.938099 -14.258542 -89.522471 -90.536193 -61.391281 56.231627 0.000000 0.000000 0.000000 -110.966875 21.964258 3.861120 168.680075 21.175685 6.393098 -152.075874 -11.912424 74.754142 -163.664900 79.498164 -176.159613 0.000000 0.000000 0.000000 +-1721.233203 -1194.044867 -20630.083537 -135.988139 -62.285708 106.895836 70.088109 56.427887 -45.909340 100.170083 86.229395 -70.292462 -66.134501 -78.820886 -33.895877 -60.156682 -10.344571 -4.156450 -89.758591 46.185292 44.816007 123.098132 -59.077087 -62.914725 -131.299081 -61.114487 72.385789 -100.226346 21.536089 -165.710983 97.295134 66.466441 95.905942 -81.111787 -19.474394 -60.846645 -82.566586 32.622692 -34.968588 -71.471405 26.473769 59.411677 144.085940 -76.317621 71.234911 -173.265008 -66.479674 -21.521021 82.948264 -6.308731 96.170098 -38.460668 -5.741203 -60.232830 -90.288179 -35.015433 62.293179 0.000000 0.000000 0.000000 -119.676515 20.298893 3.136069 -88.521212 -3.904148 60.840508 65.478389 27.259695 -62.581037 -97.436134 -21.181110 -89.644692 0.000000 0.000000 0.000000 +-2562.721952 -989.330938 -20522.455908 -27.803819 -24.485641 177.156293 79.855020 57.504892 -38.794276 143.057926 57.852224 -51.996868 -90.584624 -67.095837 13.456305 -53.142106 7.761047 -30.754323 -96.559949 3.422042 26.717712 104.447381 -57.022210 -63.522405 -119.760345 -71.097450 56.252285 -134.313208 -4.049582 -169.254476 86.046579 -0.178022 99.761300 -84.166193 -7.565246 -63.879128 -82.314595 33.873549 -45.121836 -73.818654 50.850111 108.619525 131.757039 -33.658384 92.200137 -114.530495 -13.300547 -66.053865 68.107743 52.107074 76.911231 -45.676583 -13.096213 -59.990893 132.362398 60.218338 -58.775526 0.000000 0.000000 0.000000 -150.618328 28.003725 -8.018849 -67.489662 -31.493321 54.363492 60.474990 18.928549 -43.550344 -89.494335 -29.746394 -88.094671 0.000000 0.000000 0.000000 +-1974.165504 -945.487921 -20518.475590 -92.316234 -36.296698 72.676997 65.967555 67.870157 -63.244268 -119.182813 55.990376 -51.726198 -73.581549 -79.433875 -15.623249 -51.422683 -11.624488 -25.191087 -95.175058 -9.652889 43.709472 109.704963 -65.373435 -52.668425 -153.072856 -74.540311 81.283812 -102.858402 -32.005607 176.782312 71.294560 -66.411087 137.089716 -85.611559 -7.875310 -61.587826 -82.021328 36.785395 -48.276689 -58.141457 7.737326 124.169576 -177.991665 -11.879716 80.464893 -107.170406 -7.709581 -68.586644 68.240079 63.114348 79.239916 -40.781605 -16.696374 -71.121566 -152.739607 69.827467 -3.595197 0.000000 0.000000 0.000000 -178.658622 41.352255 -24.959618 -95.782230 5.168494 87.490972 55.390511 15.414718 -65.784386 -87.484324 -39.031130 -106.967325 0.000000 0.000000 0.000000 +-2261.534484 -900.915228 -19690.678259 -43.160415 -27.351238 -73.567774 45.889437 71.894829 -62.695132 -123.053065 12.410533 -54.247870 154.823970 -45.798500 70.878688 -72.998815 -45.196182 -34.961045 -100.036009 -46.966440 42.541502 110.452018 -40.030218 -57.068994 -129.329658 -62.879956 95.570345 -86.194860 -50.009868 150.977439 49.057488 54.984534 -165.227920 -82.196197 16.473080 -71.235080 -63.384447 60.709753 -31.944498 -48.728124 -32.304250 132.182556 134.313710 -2.390139 85.143135 -113.227078 4.591111 -62.427658 72.296452 42.923669 78.284625 -47.353176 -14.064965 -61.244436 -53.368236 64.892292 74.316183 0.000000 0.000000 0.000000 141.389550 30.301974 -47.068350 -91.410013 3.134012 84.819119 55.400776 9.456066 -61.876831 -103.552212 -46.741170 -102.336509 0.000000 0.000000 0.000000 +-1455.061712 -942.191778 -20623.962509 84.912412 -4.067599 147.626158 -39.633183 66.315172 -165.339275 -118.808485 42.868803 -54.292099 114.192926 13.176444 81.835798 -171.742423 -49.485515 7.795073 -95.490683 46.037024 89.364448 129.274382 -66.163214 -57.481609 -86.849251 -83.496375 60.519063 -104.231313 -44.428469 126.534554 -73.240093 73.673285 58.051250 -72.819089 16.645579 -74.561138 11.945168 59.902067 55.941209 -52.820882 -30.546549 128.044752 119.389441 2.072216 85.202955 -121.124228 4.915853 -64.824005 75.204868 32.992534 76.214897 -40.711268 -21.297228 -83.001770 147.961984 -28.129098 -25.398974 0.000000 0.000000 0.000000 159.845418 44.436398 -27.427213 -82.608776 -0.798633 79.574039 50.744423 17.093979 -91.254149 -104.121986 76.983424 -119.748514 0.000000 0.000000 0.000000 +-346.312335 -987.720831 -19742.847032 144.128742 30.315556 8.211189 78.955805 28.992228 -8.198873 -109.584968 3.831985 -72.463442 96.925962 21.071758 45.815475 -156.365287 -7.987160 -4.699436 -10.293885 -60.617529 -94.422908 -150.220266 68.953200 -52.215785 -110.796482 -87.562839 111.117167 -71.743202 -46.138596 122.894662 -80.000781 76.010414 83.086420 37.431869 59.959002 -41.094503 77.959474 -85.726056 -55.950085 -37.539083 -56.313493 133.941161 110.327051 30.989871 87.071770 -135.239153 -26.974722 -59.164952 74.044623 25.142094 59.983813 -44.365187 -21.714897 -64.168559 113.464188 1.116095 -24.066539 0.000000 0.000000 0.000000 -173.986158 53.006674 -5.055856 -86.547491 15.309646 88.817757 56.019231 18.177870 -84.022499 -99.451166 19.894918 -102.034589 0.000000 0.000000 0.000000 +-1228.061908 -970.255194 -19574.137827 44.949556 12.898701 179.551096 64.323495 65.070377 -27.603072 -136.253582 29.292792 -74.507618 98.570895 6.530477 32.363456 -154.643052 -5.880605 13.770159 -73.408200 -72.946534 3.142269 170.593300 57.480894 -77.018600 -95.328031 -63.312809 86.739984 -44.619318 -61.136033 82.203196 -88.842987 70.809007 78.704111 89.435037 43.683489 -88.279176 -91.343991 -28.544573 92.009045 -51.949235 -48.222749 127.570417 108.388979 33.268020 84.615436 -149.475937 -54.972843 -38.134386 73.919754 31.295902 57.897224 -101.729312 -7.865872 -14.278217 107.222317 8.236633 -25.358186 0.000000 0.000000 0.000000 -129.915892 42.083777 42.593987 -86.314708 18.517141 91.514525 60.057528 19.145026 -87.042321 -97.398914 21.385426 -103.043679 0.000000 0.000000 0.000000 +-1505.530858 -538.836432 -19234.662747 -56.523874 46.297571 -60.343126 43.233041 74.799770 -17.329667 -115.047039 56.034990 -95.153519 95.562808 -1.462047 42.029279 -167.659593 3.460779 -6.100177 -77.513721 -68.466494 -35.218704 179.979481 63.739425 -29.308335 106.373169 -71.592614 -91.863949 157.548735 -51.992897 -135.631661 -102.677738 77.335776 61.255854 -40.118257 -78.002196 -40.144135 -95.076160 27.043207 69.742765 -71.181295 -35.276056 128.745526 114.601649 36.275861 89.673944 162.887155 -36.202487 -20.742107 70.175824 26.753733 17.015480 143.070233 -55.205035 70.346593 107.028149 22.267238 -30.217266 0.000000 0.000000 0.000000 -129.516364 18.627172 35.601698 -64.665778 -5.722998 41.427417 71.956879 33.638367 -104.800858 123.609228 59.571946 99.558252 0.000000 0.000000 0.000000 +-1853.513085 -194.373022 -17916.545853 -15.851591 -68.096848 88.738255 95.187615 65.881947 -23.674596 -97.752313 70.611392 -86.826657 -106.310899 63.842478 -147.621868 -139.415488 -8.227506 -7.300933 -52.511003 -65.413491 -83.537088 -107.433092 25.807351 71.800795 -94.315966 2.437817 83.837583 129.951269 -1.966347 -99.584354 -98.527825 73.548624 32.924130 -85.636629 -62.971080 4.894512 -96.207356 30.476682 46.849574 -67.127347 -51.846093 135.126998 112.124422 42.577055 87.577599 138.395361 -39.076180 0.352927 70.312385 41.955412 17.837319 81.070586 -28.299321 103.279298 112.004271 14.890934 -27.420256 0.000000 0.000000 0.000000 -134.428094 20.150283 26.255072 -66.735190 3.116922 54.661637 76.974064 42.783892 -119.211271 121.010950 75.731347 38.039512 0.000000 0.000000 0.000000 +371.296499 148.328089 -20257.519405 0.405116 18.289829 143.586791 92.251872 66.738644 -62.049688 -95.761662 34.200464 29.226288 -87.195914 39.472280 53.595007 -139.626782 -31.078357 -42.853011 54.335062 46.515175 -85.878759 102.039477 -11.068916 -85.902863 -87.613493 -9.043038 54.128040 -137.560487 -35.413742 21.559699 -80.966462 78.271006 31.207393 -32.797011 -65.543635 -71.068897 -85.869012 7.965646 73.692500 88.787639 3.222277 -52.193905 -162.712093 54.265790 69.614028 107.534591 -55.475463 39.979956 68.070692 23.331331 47.999862 -45.701309 -35.876497 -101.992568 147.772459 33.345687 2.946083 0.000000 0.000000 0.000000 -131.175262 16.335964 40.426052 -45.117422 -17.244857 13.878175 77.055586 32.982754 -128.681362 155.631649 84.956644 84.711576 0.000000 0.000000 0.000000 +-2915.467100 348.530512 -20821.043202 -16.680098 51.318012 -149.183681 102.485692 68.961795 -56.510309 25.696607 70.045032 3.655078 -105.443841 42.736437 -35.180524 -149.834454 -8.389016 -44.288029 55.922962 -14.532653 -60.815583 159.030414 -80.586122 -77.009121 -82.937962 48.090160 64.901850 -156.568197 -0.588749 17.010965 -143.622863 71.794228 -125.730009 -111.051348 -71.435938 3.154278 -94.846796 -21.168980 -27.296287 125.879951 -76.710895 11.704853 -136.256720 0.749094 93.261420 73.677310 -18.388679 40.874795 77.070811 9.849272 39.369697 -9.823562 -71.306098 -133.089741 124.018444 -11.540006 -86.344839 0.000000 0.000000 0.000000 -88.065417 -3.315452 16.630763 -46.571064 -17.127460 3.167943 67.969819 26.770575 -140.457474 -76.049913 -16.931334 -100.535169 0.000000 0.000000 0.000000 +-936.156284 397.379267 -19924.727939 -20.588053 -63.089838 -92.516774 174.196107 85.185724 -5.255975 -133.536659 -60.986304 146.560452 -90.655471 43.518002 -26.919904 64.869209 19.993688 -47.868007 104.938993 9.162923 72.553444 107.056598 16.647874 -102.117688 107.442559 65.363068 -95.602865 -111.994811 -42.336856 -56.881275 134.172883 -72.801988 1.609219 -111.138381 -63.913268 -17.338016 -91.506001 26.462276 -49.945965 90.471322 -22.889366 -69.154466 -111.745490 -16.716023 89.371073 75.917059 -4.140715 45.089825 76.555242 4.270261 37.726392 -35.740956 -67.116235 -106.309587 120.005216 13.507845 -91.351609 0.000000 0.000000 0.000000 -84.132981 6.153435 3.796309 -66.217233 -26.220502 -18.179572 37.907690 37.536609 -127.218328 -100.570665 -31.423452 -48.787643 0.000000 0.000000 0.000000 +330.452616 254.681284 -21728.145538 166.380754 -1.474384 90.639244 61.909874 78.710435 -122.507139 -101.376800 -65.529974 153.295612 -101.305944 34.562867 -26.694509 142.688230 -60.973986 -151.661626 -38.952729 59.494376 59.031579 100.870714 -71.654638 79.716827 71.078662 52.773812 -78.283985 119.338253 -9.506232 136.385108 47.608417 -34.857698 75.979704 -105.274745 -33.251132 47.380893 -96.586648 -25.168885 -35.691429 114.552961 12.286206 -81.532931 -46.365174 -53.311288 -50.952181 82.641418 -21.851352 32.716951 81.575832 6.655737 28.496058 -19.387670 -62.251368 -114.981593 -100.835561 1.345345 48.569710 0.000000 0.000000 0.000000 -71.502424 -9.753365 -10.802731 -48.330528 -21.712626 -15.956591 38.126453 19.784007 -122.714174 -103.012483 -18.758601 -31.545427 0.000000 0.000000 0.000000 +-683.553170 367.069208 -19964.481828 132.536045 -47.683779 105.766249 -24.479928 76.674286 158.744875 -115.180683 -32.694282 132.953209 -102.954703 35.811638 -37.232493 56.305529 -17.199732 -14.453357 109.021089 56.931852 139.193728 79.175452 -43.975255 101.147370 61.688539 -5.449049 -117.067923 117.732933 -38.683132 122.460435 81.147239 36.753708 -112.937626 -111.740666 -51.726530 30.763195 -90.302232 41.470835 -23.496065 -34.921403 -61.432926 11.604108 -15.366650 -72.698605 -0.193817 80.429301 -5.127214 47.706169 77.864567 7.567463 27.385901 125.459561 -26.827590 116.506057 -87.775899 -4.092720 16.435974 0.000000 0.000000 0.000000 -68.213291 -16.986680 1.150801 -41.139223 -29.953705 -23.153074 32.851237 65.903709 -106.096351 90.611235 -13.697591 56.239051 0.000000 0.000000 0.000000 +595.198505 266.534540 -17927.644388 128.428988 5.911150 98.481717 -8.473627 81.576189 160.035931 -150.777082 62.806846 60.147979 -114.257353 38.647691 -67.747304 74.203224 -10.491524 -14.899086 42.264353 -66.159170 3.165765 72.868762 -71.619995 105.365275 39.949080 38.522895 -125.342746 80.900318 -64.154643 169.182928 88.906580 31.481336 -140.666550 -107.180607 -42.442553 53.027680 -93.059227 -8.565923 -41.751230 -67.169068 -24.243793 -44.862977 40.925100 -46.358866 -66.722641 88.090771 -33.502245 67.815527 82.779621 -0.928843 48.986288 137.473418 -15.379702 117.490822 -86.698614 -56.469040 -5.443448 0.000000 0.000000 0.000000 -50.296980 -29.122380 40.537716 -1.217531 -36.132682 -38.113039 -92.648941 47.742996 122.648062 84.964801 7.059703 5.137250 0.000000 0.000000 0.000000 +-526.888978 310.159235 -17893.538958 76.282933 38.255515 43.311006 -40.779749 73.992304 136.270396 -95.104039 49.288443 112.547301 -102.753160 24.390128 -57.635911 174.030751 -3.020108 61.389302 94.816753 -54.549612 -23.434136 76.811988 -47.183989 105.629409 61.019924 -38.581520 -137.039336 -150.173432 -28.508841 -21.072831 78.990900 37.502950 -150.839459 -117.788662 -37.089574 48.576995 -99.462390 31.175019 -57.559484 -12.287731 66.072964 -61.727320 72.602612 1.487492 -79.108846 85.975115 -1.398398 65.261948 75.845373 7.247354 41.008785 99.957257 -67.612035 179.065440 -42.668253 -76.125372 -47.684550 0.000000 0.000000 0.000000 55.435893 -33.203633 -89.558311 -22.820588 -14.283526 -34.732699 -71.283112 55.002201 151.920418 163.972418 -57.980403 4.441161 0.000000 0.000000 0.000000 +-1859.640703 103.691469 -16092.749098 66.786500 -13.766647 80.192876 18.851204 76.814659 -160.862283 -98.104295 66.735381 94.743178 -130.848997 -45.381414 -0.924967 24.307515 -3.453482 -10.261985 76.411020 -64.108743 -45.753021 72.218257 -57.726485 125.805851 99.006676 -3.309080 70.609886 -170.402863 61.765415 171.797349 83.279042 26.617866 -165.814170 -113.705129 17.372422 21.626565 -89.075145 -28.222787 -50.049160 -45.081552 -60.386930 -21.725593 44.905352 -29.148159 -52.221449 110.865956 -59.966193 46.193831 87.459637 30.046977 70.502796 153.753476 -32.694682 114.403049 17.586616 -82.592770 -81.353096 0.000000 0.000000 0.000000 -81.242256 -65.660204 47.729995 -130.597827 53.633170 156.634860 -127.446803 18.805261 105.703295 113.595102 -11.380822 78.302968 0.000000 0.000000 0.000000 +-3020.572380 -242.599630 -15836.423789 126.023649 68.074318 142.518786 82.214129 45.175828 -20.221465 -81.125017 64.580193 100.768364 83.567198 52.748102 29.716543 4.540186 -28.543822 -10.646372 -77.844956 70.598848 77.320968 70.866324 -53.681731 122.782500 83.452733 32.255251 13.934526 13.988680 26.541952 -0.667501 -86.791559 30.897373 0.384230 -104.813198 -42.565379 -54.835169 -96.318625 -64.635600 -70.093037 -65.989324 -28.784084 -56.029555 -29.001957 31.082432 -49.727688 -159.933539 -9.103136 74.009424 56.443423 5.402778 30.417169 -137.534094 -78.961552 28.223281 -106.784664 -85.371338 68.070101 0.000000 0.000000 0.000000 158.098944 39.395788 11.653890 -83.753837 -26.887737 83.224742 -136.164631 50.661769 113.889589 -151.540470 0.867713 -71.733151 0.000000 0.000000 0.000000 +-1820.133341 -488.213598 -14587.474274 10.584045 38.979677 143.407832 81.059173 19.635328 -1.026977 80.695172 71.824525 -67.780819 94.826787 44.099503 33.212613 15.117763 -7.436220 -5.108062 -89.466335 8.258504 14.192646 73.323079 -12.825333 120.406106 64.709278 17.491266 56.383953 4.802811 44.254055 -1.120459 83.785321 80.007211 -153.576573 90.933038 -61.187985 74.252318 15.737288 67.439559 -59.570751 -42.692318 9.539923 -89.863510 -66.742779 22.938611 -67.368199 -128.978452 -20.393963 14.531596 48.479247 34.193433 33.312785 -100.167533 -7.961713 -32.617036 104.991546 -65.265574 -118.362166 0.000000 0.000000 0.000000 178.871350 46.400496 36.112302 -56.633205 -30.960506 53.548744 170.718831 -21.611914 -110.259458 -129.877276 7.946859 -50.482193 0.000000 0.000000 0.000000 +-1816.262454 -530.989813 -14317.018814 -34.138626 34.167155 -157.941361 92.012144 40.203175 -11.986628 25.276990 -59.475675 111.670901 98.122377 41.824772 41.450252 14.988305 2.957607 5.728325 -88.341095 -31.745461 -6.156563 77.746672 41.071185 122.119109 78.941350 17.931939 -8.617829 -13.956619 37.941015 -33.258624 104.301905 68.593752 -130.187012 76.753475 -0.801682 88.488961 65.224619 -32.837329 -59.649615 31.079701 -63.645892 -52.461098 -57.001052 23.987897 -51.058633 -167.235539 -31.715129 -13.259542 153.305067 6.494372 58.745332 -93.934009 -0.851635 -41.416007 135.119547 -26.494835 -6.525474 0.000000 0.000000 0.000000 -89.665149 -35.078025 60.685504 -146.680924 18.492110 105.968554 178.737853 6.690942 -58.760393 -138.185270 -0.950567 -49.302237 0.000000 0.000000 0.000000 +-4784.375422 -356.047735 -15318.776474 154.066246 -13.493710 169.107167 80.915613 -18.551678 -64.477339 77.027150 -4.709317 119.826368 107.979380 56.862594 55.189333 16.845498 20.524049 -24.304455 -93.775241 -41.090635 -20.276203 90.361706 -33.543634 84.459071 76.347993 4.114802 49.834600 29.106930 21.765845 31.236531 81.264871 53.083055 -90.660457 115.379557 53.081327 6.516310 84.198332 -59.376137 -75.315629 23.807153 1.911878 -82.121184 -38.308703 -32.336880 -56.477951 106.509673 -7.362454 0.785773 179.518441 -5.919191 34.033589 -95.617294 -0.094242 -53.127032 133.630695 -53.915241 -16.275563 0.000000 0.000000 0.000000 -70.388058 -31.813025 35.298610 -119.988923 20.573458 98.302153 -167.743714 -3.638345 -2.596872 -140.143170 -5.311107 -58.538193 0.000000 0.000000 0.000000 +-739.262732 -573.529303 -16817.526475 -138.925002 32.496078 136.391529 52.581607 -72.229499 23.948295 -85.611762 57.345460 -6.241193 113.192177 54.720317 66.689335 9.208149 40.096880 -17.094066 -92.176175 -22.077589 -23.234972 95.776038 -18.194037 92.385792 79.109420 9.112176 50.161588 3.825879 -12.710292 14.715513 77.664182 22.008061 -73.233769 97.418355 38.440496 35.891519 80.670259 14.229469 39.900934 80.260014 46.657486 -46.630966 -26.887053 -20.746839 -56.507863 81.722151 3.087543 14.386129 174.086933 -14.707076 24.249512 -122.563940 -8.575557 -78.250025 129.099831 -59.320816 -16.627623 0.000000 0.000000 0.000000 -72.073649 -11.098934 11.943608 -95.941200 51.480206 111.960313 -149.470164 -8.459039 -24.614466 -155.527324 -42.076740 -31.174797 0.000000 0.000000 0.000000 +-3580.598868 -607.940619 -19103.124128 -43.948998 57.339525 105.671178 74.076361 -62.953881 39.421243 117.297001 72.496827 -81.732998 114.620640 31.383082 87.871906 -110.656092 7.043227 -103.536035 -97.363511 -40.366132 -23.634047 -177.951455 -80.097557 7.604713 79.641749 -16.394101 13.098500 -9.865475 -52.644331 47.241402 82.816412 10.173725 -56.483821 88.869998 3.289568 38.615239 82.169648 -27.684827 40.329487 75.512896 52.005846 -20.672073 -53.483240 -33.292354 -47.580146 66.180374 17.461851 18.418993 176.255369 -3.000580 31.344317 -137.738876 -62.092928 -92.262797 148.563678 -74.494316 -33.612254 0.000000 0.000000 0.000000 -71.873046 -22.603088 29.718561 -40.789947 88.374599 165.414460 -167.090158 -29.935887 -23.286328 163.304507 -9.144680 16.339037 0.000000 0.000000 0.000000 +-6167.087895 -533.992193 -18619.127876 105.416642 50.926906 160.779490 88.033583 -59.591789 60.199291 -81.216081 59.830643 31.392770 117.089859 20.624282 73.459704 162.078299 -54.610553 58.376124 -103.842956 -66.569257 -48.293612 115.399565 -84.397444 80.790059 85.550537 56.839807 98.576292 25.624509 -40.659778 18.988793 88.026587 -11.418197 -60.617102 86.923621 21.959568 -9.040260 88.613438 -21.610729 -49.716769 77.385993 33.299909 -43.844347 5.969607 -84.258463 -110.156122 54.674867 43.605257 53.348388 -179.450958 -20.899767 33.910214 -121.143602 -67.947893 -115.580367 2.539262 -86.183244 87.028721 0.000000 0.000000 0.000000 -78.648755 -16.059396 67.940095 77.593961 75.159890 -80.975795 33.869794 -29.313154 -43.699305 154.885204 6.786219 55.139853 0.000000 0.000000 0.000000 +-4267.788925 -485.386847 -20853.724739 -132.747872 -13.371603 121.648852 89.930981 -39.390309 54.588144 -37.171242 72.614459 102.619180 112.045265 24.287325 92.198883 -145.850538 -54.178097 11.072936 -90.950223 -72.158145 -76.900322 88.286746 -57.324059 123.836371 -135.728342 74.736542 -15.352612 42.758262 -25.810676 -8.812750 87.667773 -29.604588 -51.341832 91.679939 27.986599 -26.121006 90.380311 -38.079600 -73.316729 70.608036 -5.062511 -55.295501 67.295513 -83.457432 -176.065904 54.780382 42.182653 57.368447 158.934300 -24.245513 63.272712 -172.008047 -68.778476 -96.342515 -69.818512 -26.415245 115.410025 0.000000 0.000000 0.000000 -78.671934 -24.165647 74.195992 -83.426700 37.510688 103.315424 43.578722 -20.972350 -50.059119 165.832439 1.127379 67.104158 0.000000 0.000000 0.000000 +-6340.069558 -629.430914 -20525.347963 74.315340 59.047182 165.112394 104.299551 -7.336071 45.637884 -118.249523 71.941129 -26.929304 111.135648 49.110057 54.530443 -91.233661 -44.812696 -44.787144 -87.220440 -76.743011 -116.938602 96.130011 -74.622202 109.317494 97.774545 0.122694 -154.948230 21.896459 -13.753826 -17.425192 97.427319 -56.902307 -70.126509 88.620763 16.261358 -47.302987 91.471396 8.640891 -78.214625 -79.996219 -20.240968 76.017088 -16.708694 -71.739428 -142.250693 57.460658 47.057897 67.796005 115.174575 -19.102663 69.368405 -119.835625 -21.233562 135.427355 10.280137 -47.544920 -53.127103 0.000000 0.000000 0.000000 -80.573487 -18.372294 82.262963 -16.587214 86.211037 170.358125 41.927587 -29.555540 -43.896800 -85.849246 -37.519543 -0.390596 0.000000 0.000000 0.000000 +-4830.490207 -800.301519 -20859.192286 173.982461 58.053324 -11.694429 144.635113 44.778819 62.650779 -33.006646 85.743315 65.333128 101.673205 43.866047 46.381092 -22.556924 15.732254 -17.155476 -87.235802 -69.178344 -130.389084 84.233432 -71.179982 111.781514 -154.899625 -73.718243 92.614289 -9.160918 -9.416714 -38.963265 103.275706 -69.369675 -90.408568 87.003324 34.373415 -59.003772 91.240473 2.579868 -84.456614 -84.894001 38.963490 78.253269 82.009659 47.759771 61.572911 61.656219 46.393768 65.227879 69.706533 45.312538 48.747186 -112.283175 -3.932931 140.971304 60.176035 -19.919348 22.072606 0.000000 0.000000 0.000000 -80.374503 0.959514 79.856412 -87.752923 7.763567 95.585540 43.471366 -29.257691 -47.628994 -82.126499 -52.818462 -40.546740 0.000000 0.000000 0.000000 +-5455.026412 -893.407293 -20565.240899 143.916537 38.224171 -143.233055 120.200556 17.182158 73.350180 -90.468511 84.781174 -27.869239 100.181437 18.033596 47.575841 166.206512 62.703773 -178.573526 108.206005 -83.157337 41.567178 89.230888 -67.617735 103.053535 115.776433 -55.426661 34.181642 -69.493914 46.085526 -125.100346 -149.838183 -77.327162 160.407800 93.116639 5.720769 -17.523860 93.025022 -11.653198 -58.094706 131.783962 58.737880 -43.497401 89.640084 -47.147433 92.206142 68.035870 -13.441957 66.792913 -102.569780 -56.139382 -71.650308 162.467402 19.776096 19.239662 -34.798618 80.716410 -107.010790 0.000000 0.000000 0.000000 -74.276259 -6.416365 75.811453 134.411834 61.762655 -34.628370 64.637035 44.567902 -76.411285 95.953117 42.976125 -59.991629 0.000000 0.000000 0.000000 +-4324.981718 -854.425116 -21542.345535 154.548822 65.750959 98.429057 115.326968 9.889910 70.639190 -109.204612 85.904044 -29.028376 102.859358 25.923172 50.991835 -69.008921 26.457932 -33.637631 101.634574 -81.253229 49.773678 107.599163 -69.581209 78.890456 -141.630989 -63.094922 -70.972409 -103.509846 31.851771 178.083443 135.032389 -70.338929 -124.822989 93.747299 -1.061147 -22.237822 93.792620 3.517628 -57.752381 103.285596 -21.798855 -64.274762 92.684431 -4.412160 79.787448 60.751120 -25.395925 68.101564 -114.420099 -50.150140 -56.246582 134.075296 -1.749791 50.369062 -72.968739 -37.588885 -111.433597 0.000000 0.000000 0.000000 -73.365699 0.626492 70.131063 167.000433 58.398557 0.743716 -65.334187 48.297662 138.354863 122.135594 53.399078 7.528852 0.000000 0.000000 0.000000 +-4752.644699 -786.145025 -20867.409712 124.513129 3.729827 -47.271479 98.005748 11.302542 58.949382 -9.588101 68.108251 160.000787 97.367275 24.406015 36.810496 -26.943504 65.008079 -13.060148 113.000928 -85.048299 50.214020 -126.776980 -53.121102 -51.320728 -129.555853 -35.793990 -94.534551 -117.897068 46.597578 168.736932 117.350605 -43.633591 -102.465807 93.925424 -28.805122 -5.136955 93.572175 -7.566450 -43.849349 142.242884 -76.369817 -29.653996 121.599237 -87.665945 80.911059 51.812466 -39.371084 63.729321 -109.213588 -62.932799 -58.209175 115.748094 12.553355 69.827026 -81.965650 -89.784661 -94.997829 0.000000 0.000000 0.000000 -68.923521 -1.658739 75.864557 141.697557 19.848746 -35.904066 65.412784 53.420334 -95.851976 162.089177 54.438016 35.995744 0.000000 0.000000 0.000000 +-5250.703045 -786.071417 -20879.318456 -49.163777 41.814469 118.434404 102.599623 8.342809 50.363887 -74.869286 33.586532 160.497709 102.062931 37.471628 38.981466 -42.349125 65.230225 -64.637927 -122.518722 -84.625911 -63.096232 171.442555 -35.844354 -58.474097 -109.918722 -25.134145 -98.816937 -114.681488 19.670838 139.640145 118.808728 -57.347218 -107.908569 94.087811 -27.717752 -11.617651 93.484463 -0.399802 -45.296659 -25.759325 -81.610212 115.744379 155.999922 -63.251115 84.609422 49.662116 -47.236727 52.619836 -142.195575 -64.420265 -19.319576 122.771848 7.858606 77.451904 102.698899 -62.763876 73.124157 0.000000 0.000000 0.000000 -70.414810 15.732405 75.475140 149.717044 9.616221 -22.768793 81.187990 36.599839 -91.645579 144.780133 53.898894 45.266568 0.000000 0.000000 0.000000 +-6028.670584 -761.750067 -20686.119630 0.970341 -35.467181 68.491559 104.121523 5.066475 39.213984 -77.501701 56.617980 162.501761 105.751511 31.066814 39.807690 -111.325641 -6.301037 -139.564040 -86.113700 -79.726718 -101.914523 126.709699 -35.595899 -60.118287 -104.722838 10.149511 -108.220041 143.188361 18.820382 13.015139 108.474238 -45.802500 -82.689242 92.176664 -20.350404 -31.911199 94.417089 -3.084290 -60.301418 2.277134 -77.335663 111.595699 -107.554402 -23.264506 79.588134 62.523472 -39.784341 29.737006 -149.236051 -51.989359 -4.517772 116.438171 24.721815 75.656233 97.237862 2.930880 69.889449 0.000000 0.000000 0.000000 -69.966603 31.397124 57.859581 151.396464 3.885889 -14.353478 12.174025 83.776188 161.616474 136.073400 60.079551 79.682291 0.000000 0.000000 0.000000 +-5825.342013 -642.011297 -20465.968710 -106.937688 16.018416 104.028786 114.144389 -4.684318 -5.964673 -61.656699 -65.427618 145.827784 109.196972 33.170042 50.846154 -162.942381 12.016324 -31.232787 98.420388 -73.020918 50.911519 123.040683 -41.642729 -48.353707 -103.433699 -58.309782 -110.204117 171.807303 31.150967 -6.257354 -101.561776 33.767195 84.053515 93.616356 6.471148 -57.056730 92.904097 3.443742 -56.378742 16.755998 -79.872727 110.837699 -105.095711 -13.338031 90.081338 70.422185 -37.981123 12.857773 -158.994557 -43.542192 12.731742 113.981490 32.486047 75.757546 116.146061 61.646097 44.938273 0.000000 0.000000 0.000000 -70.190976 35.174873 54.614025 151.967777 0.877021 -9.963296 -36.738241 79.904551 96.316900 -81.943207 20.448688 -109.065104 0.000000 0.000000 0.000000 +-7920.580200 -518.680548 -22473.857179 53.937680 -35.426544 -62.186754 125.154723 -23.855510 -50.141946 -32.556722 -80.214877 113.274916 133.128377 44.562378 80.411355 -141.612426 14.042070 -68.568690 113.855573 85.127743 60.248927 120.546915 -40.825888 -47.774676 -157.966004 -61.426146 -113.336882 -94.147561 -31.466771 10.309346 -96.116942 6.479971 88.044849 -115.300447 79.524400 69.799046 94.407527 -3.374918 -74.414568 83.574568 -83.701770 35.894016 -110.972171 54.816899 81.358205 68.332173 -25.007130 18.610430 -159.658814 -36.878632 16.650489 110.011049 22.355049 71.929248 110.730272 60.462456 49.305233 0.000000 0.000000 0.000000 -72.607504 29.535974 47.785842 151.320836 -0.738593 -8.990887 -95.121409 43.901474 16.473009 -83.419919 -28.818542 -111.083145 0.000000 0.000000 0.000000 +-9227.423619 -377.324469 -18681.476694 -141.906242 -57.578068 146.622845 117.263129 -13.299103 -54.708609 25.756019 -82.118808 76.878060 -157.995786 -9.300553 78.373339 -145.535075 -0.064452 -45.373838 91.854205 69.278966 49.165437 129.359968 -49.465687 -64.918484 92.800744 -77.943694 25.231792 90.759131 76.160447 -151.798212 -97.684317 -7.295415 67.441913 -124.121782 -82.727018 123.122798 92.580307 0.471945 -67.027562 60.236085 -82.681656 71.898464 -109.750233 35.446394 86.032169 70.987033 -23.425716 15.033940 -166.421689 -34.745380 28.355517 114.770769 27.703949 75.648198 110.536903 46.030821 57.714392 0.000000 0.000000 0.000000 -73.725327 26.099736 46.549058 150.116904 0.049333 -9.473975 -89.282443 29.804912 31.145058 -82.423177 63.547951 -135.674694 0.000000 0.000000 0.000000 +-11679.527693 -399.981695 -19864.077097 24.087520 -8.516477 -40.203510 170.838435 14.709914 -86.097798 75.789414 -83.877609 23.755602 -158.366760 54.833550 82.573999 -132.875297 18.453289 -0.003728 85.794841 54.640461 26.552358 117.799866 -32.442684 9.166858 -102.489961 36.398834 -75.689820 156.184187 -18.959501 30.141244 -99.409039 -15.102978 80.105567 -92.945504 -24.032663 85.215412 92.178155 -9.705500 -72.024897 68.569217 -81.986414 48.418726 -146.691804 60.228070 54.845601 74.121277 -25.858491 6.832831 -168.204975 -32.129202 32.456366 116.851362 24.058728 63.946675 130.222960 33.828732 32.701160 0.000000 0.000000 0.000000 -79.492993 26.621047 26.152366 153.138227 0.732125 -4.044685 -103.794117 13.871821 15.419380 149.589711 -61.858185 23.821881 0.000000 0.000000 0.000000 +-12468.905622 -502.150519 -18478.058744 164.312024 -46.246911 -149.185288 -171.510843 -44.115510 -43.685689 -53.532559 -86.368033 150.150703 -177.938421 73.930859 31.930961 -134.961897 7.432030 -17.076616 -151.857280 -77.180339 -25.017720 96.644043 -25.409551 10.721363 133.106990 -41.775976 59.615704 107.128523 -61.135210 -50.900131 -106.246171 -8.449008 86.015772 -97.682849 19.254396 85.450067 94.069475 -0.410161 -78.189821 106.229891 -86.515887 20.457541 -127.165572 21.819214 89.783844 74.769529 -30.391818 11.497698 -167.047010 -27.264154 36.967504 140.907350 18.748600 69.487563 132.360055 23.149543 24.497015 0.000000 0.000000 0.000000 -75.592660 28.502510 28.324618 152.114261 2.837193 -2.776779 -126.739716 -8.802307 19.578728 171.412555 -42.190205 10.267084 0.000000 0.000000 0.000000 +-12587.726292 -458.100714 -19585.421775 24.829926 -5.993405 -28.137960 169.850700 -57.001873 1.135307 -170.383199 72.353472 0.450116 130.397068 56.682276 44.552903 -24.349812 11.888903 44.462224 -111.231514 -27.112034 -23.318126 106.662342 -15.236568 36.481976 123.559025 -4.207706 104.451588 -90.009425 33.678341 -166.650793 -106.566029 -17.537990 93.755618 -109.230732 56.212326 78.514406 118.073064 85.218828 -61.301663 156.618627 -69.428181 -67.871934 -120.527270 15.999456 80.680481 84.644677 -41.190016 4.476557 -161.459843 -19.012982 39.450487 178.808876 -51.705882 140.810810 130.186535 9.125373 12.043379 0.000000 0.000000 0.000000 -80.574758 34.547294 22.105695 153.922545 3.248434 -0.110570 177.486866 -17.940411 56.922692 -159.565810 -38.898680 -15.236518 0.000000 0.000000 0.000000 +-13789.723022 -616.312323 -19828.771852 84.411400 67.125056 111.424020 125.286374 -60.845123 25.757958 -80.245844 -64.200257 -48.316794 132.714229 52.371181 44.976957 -12.578803 -25.749177 103.807259 -106.204055 -31.516993 -15.767364 106.457933 -35.427260 51.846753 133.160587 -9.834061 34.884429 -108.925611 29.904882 -168.407862 -127.420475 53.555563 80.498868 144.641526 73.403346 -16.054837 93.963185 39.811643 -83.769044 -178.016351 -53.224773 -74.761834 -141.136387 -11.003383 84.605024 99.080030 -49.831369 -4.442331 -158.961225 -14.722544 41.736939 153.345251 -59.210025 152.371988 130.836175 1.895347 7.199285 0.000000 0.000000 0.000000 -78.558333 37.039610 22.924144 153.303064 5.281816 2.522695 151.030471 -4.583565 81.849691 -154.095792 -29.268245 -16.354934 0.000000 0.000000 0.000000 +-14024.625105 -325.360479 -19413.103279 155.776374 -25.567510 -142.742958 -96.420222 -42.749829 -79.158415 -101.809561 62.904406 -18.802798 133.537740 53.712136 61.923545 -36.611033 -31.881574 82.817193 -108.663032 -48.255828 -15.761786 -119.901792 -18.548909 -64.168610 137.894576 -32.708991 4.580291 132.008593 64.190080 79.735643 -122.523235 74.879841 89.144671 -172.301660 71.447503 33.633028 -97.410426 59.180883 78.277852 103.370559 1.022966 -78.020486 -141.067802 -33.793103 71.414320 112.174336 -57.702109 -1.387956 -153.556266 -25.135968 39.667964 110.545059 -71.343134 -158.631947 128.729322 4.687780 3.776073 0.000000 0.000000 0.000000 -81.867819 25.882838 42.987813 148.114495 18.365198 -14.379329 137.366826 12.917150 86.046261 -146.745807 -32.219772 -23.250761 0.000000 0.000000 0.000000 +-14404.183227 -103.020201 -19119.979016 -67.082532 26.028010 -124.407416 -88.283518 -19.135424 -71.353581 -92.156727 -13.979519 53.796788 117.579112 42.602054 69.655209 -152.512214 -21.068323 -126.850329 -108.794683 -38.750609 6.330189 -150.251839 49.535024 -98.993965 105.110334 -21.869401 44.927690 -158.911211 -17.152033 1.233273 89.909402 -11.888658 -33.799579 -149.549101 15.038928 88.222744 -103.591316 59.502322 60.692057 126.081422 -49.952965 -43.214706 -156.894663 -37.375674 79.949429 92.115090 -34.944651 14.715127 -128.598335 -28.817206 31.502539 11.843930 -44.010429 -108.357398 126.537179 7.093844 1.421406 0.000000 0.000000 0.000000 -75.471856 27.534641 25.119833 144.534864 8.399924 9.638370 -167.490158 -63.566074 -27.330565 -142.425720 -34.031168 -15.590519 0.000000 0.000000 0.000000 +-14182.217004 138.968943 -20938.166593 -46.226018 -37.291032 -47.729416 -20.071422 49.436725 44.382098 -140.501759 78.245992 113.490469 -129.632955 61.216765 -118.001961 151.339020 -36.716479 -129.771424 -131.079823 42.909294 -41.587394 -96.460963 51.483423 -61.056987 66.242530 -45.164646 60.512939 -109.014764 -74.429597 3.455295 78.028986 47.389719 -100.223022 178.905905 -22.026150 85.125868 -98.384741 44.341650 -57.851251 95.927704 43.845671 -70.145370 -137.824946 73.772543 40.890643 81.544643 7.018459 75.668339 -79.010601 8.162781 -78.834692 62.646727 -14.331062 104.982714 -96.498755 14.413985 48.701773 0.000000 0.000000 0.000000 -45.178877 -65.888314 13.251999 35.217104 -41.642198 -30.866002 -169.543931 -34.180612 -18.073148 80.031869 68.020575 46.821694 0.000000 0.000000 0.000000 +-17104.681222 206.096994 -21223.889761 7.159239 30.876372 -61.972400 -43.497157 -39.920181 -80.697620 141.276851 81.344490 22.728196 -99.835491 49.890561 -96.110051 -164.613134 0.825233 -26.825567 95.214509 -55.496876 -51.750224 -96.124009 43.714256 -67.506263 81.501783 -35.706485 50.153608 -119.675725 30.007620 -12.284101 -72.417183 -64.916675 -30.844634 172.468304 -67.512161 40.056635 -91.565825 26.383459 -78.636722 100.766076 54.452086 -46.093480 107.632402 42.296220 -77.210975 76.159502 10.319231 66.859543 49.083920 53.409044 56.056982 11.939761 -49.801958 -168.612507 -82.380138 -75.166839 38.383954 0.000000 0.000000 0.000000 31.247457 -50.102934 -84.539699 146.477454 64.388054 42.536911 -133.652317 -19.014778 -39.508617 74.406905 54.999215 20.448348 0.000000 0.000000 0.000000 +-17799.752021 171.106882 -18974.289760 106.899964 -49.368708 179.309499 114.892817 81.201814 -106.902720 -94.570314 49.183852 135.766929 110.562820 32.242725 70.379531 -156.297480 36.824306 54.956891 81.554394 -74.728502 -32.014059 99.196514 -36.963063 82.588088 95.807980 -19.572336 28.176444 2.173568 8.835208 -2.327031 102.239863 50.320380 -88.222504 146.445148 70.057934 -0.733383 91.567761 5.345279 33.195588 117.938348 2.984892 29.822401 -127.421974 -43.717787 73.525923 91.009060 -36.751694 35.795830 68.376583 12.571392 71.939288 -49.664002 -25.465720 -86.225157 136.117808 -19.325947 -7.514319 0.000000 0.000000 0.000000 46.557664 -53.496523 -76.086628 -126.549439 -64.155593 43.199840 -148.821457 -21.705775 -12.578530 -169.479434 8.892533 -21.822386 0.000000 0.000000 0.000000 +-18814.203538 -98.510130 -19390.705021 -5.073565 -12.706695 -129.534073 179.103897 -73.526558 -16.954699 -101.388845 5.305180 99.559672 115.551869 -10.388500 65.942350 -15.360952 18.187244 -19.404583 -107.125283 10.209921 -53.350295 104.402494 -58.847744 86.076684 120.717618 -14.298298 -63.180257 7.408798 -20.992157 -15.280371 109.919987 61.390540 -77.705158 141.386971 37.712284 42.097096 174.277970 81.187316 -48.390794 118.118936 -60.852306 -61.031807 -135.908829 -25.044584 58.381364 79.259824 -36.916980 35.941716 69.529200 26.277279 55.862379 -55.057501 -59.131364 -25.801175 126.025669 -11.599282 0.010936 0.000000 0.000000 0.000000 -27.282827 -51.864301 -49.715015 -122.516614 -55.012842 -56.530377 100.446169 57.358349 74.099119 -152.228710 10.487840 -16.679964 0.000000 0.000000 0.000000 +-19603.325325 -620.509039 -19229.593214 144.621137 -6.897379 -101.172937 148.092552 -46.369904 -31.189221 -112.633164 42.047067 60.897234 106.860339 38.707005 63.440195 -3.730877 -28.094747 -12.038865 -97.686781 -48.910312 -16.012714 115.124593 -66.940551 67.618422 113.025956 -33.437562 -113.313302 -3.792823 -3.185963 -8.507399 153.588686 75.247069 -40.257697 104.278902 47.184991 59.942844 86.036614 71.716927 -82.650338 107.293700 -45.658032 -78.195393 -85.783647 -0.656943 -40.511349 92.740556 -21.649180 -13.437313 153.592288 64.643245 -24.710728 -45.907492 -11.689481 -14.689446 142.481851 -16.891364 4.369812 0.000000 0.000000 0.000000 -73.164551 -20.133730 -43.210880 -106.434796 -50.599839 -61.270011 41.945613 37.610130 23.074539 -157.255789 8.211371 -25.355019 0.000000 0.000000 0.000000 +-19882.128543 -1678.100897 -19265.018699 -98.709571 1.849712 -0.599342 115.867328 -15.712856 7.805682 -127.198313 59.032113 47.990828 107.876456 25.798293 69.971954 -2.505077 -20.231652 -11.842840 -106.215823 -42.719193 -12.050842 111.606075 -55.767091 72.024421 127.170334 -48.535950 -136.866342 3.072182 14.222771 -15.573281 -178.539847 76.011097 -6.227796 93.922277 45.207039 24.286515 92.411245 -56.969828 -81.939026 104.517499 -23.379257 -63.623221 -98.645859 4.235797 -75.862792 86.923077 -8.134284 -30.902664 108.419573 -11.516457 -74.044592 -61.273627 -29.031372 -13.491599 140.318583 -15.479790 6.112148 0.000000 0.000000 0.000000 -67.958369 -8.686592 -30.316461 -109.206487 -41.259308 -54.336023 85.972786 43.940887 29.673890 -152.913792 9.836699 -25.154230 0.000000 0.000000 0.000000 +-19563.414941 -1992.320990 -18059.588782 9.642714 -56.259774 141.315065 95.707432 -7.565249 -3.526728 -121.163277 76.515834 43.123803 109.349948 42.498771 75.451939 -7.434231 -22.802867 -16.380177 -98.318552 -39.347448 -8.432605 136.915079 -76.129202 13.092567 157.411041 -68.384256 -177.777185 -26.603360 39.941443 -31.963879 -150.743583 81.321774 2.755954 85.950741 46.181535 76.260521 -83.332579 5.507914 78.129020 93.820210 -28.678022 -45.905803 -93.031334 35.555140 -55.507794 83.852066 14.579543 -19.162138 78.908929 -19.654759 -56.857486 -97.121660 -32.934935 24.110596 142.333161 -14.526566 6.818932 0.000000 0.000000 0.000000 -59.125118 -33.143449 -56.190373 -89.594440 -51.165965 -73.125829 109.691912 49.428124 34.041791 -160.299762 1.593329 -31.914831 0.000000 0.000000 0.000000 +-17961.599234 -2545.861761 -16498.044631 -78.449922 -13.309653 -131.389706 98.595363 -2.365507 2.367045 -153.850259 82.386340 0.318181 109.300575 47.849130 72.602941 -21.709745 13.098918 -14.096225 -99.199536 -45.482588 -1.946296 137.470829 -69.479758 -11.025064 173.463311 -65.572615 162.296391 -3.532475 43.242705 -12.572259 166.377447 80.020826 -12.767788 75.028742 45.530109 9.865920 -88.846225 -15.542168 89.129492 97.321300 5.372732 -52.314193 -98.579575 -17.752202 -38.825588 78.472102 4.246929 -21.260275 83.588715 15.834294 -44.287354 -106.440753 -14.060120 -12.783621 138.040517 -13.984596 4.689979 0.000000 0.000000 0.000000 -51.625555 -35.075794 -44.595024 -84.941464 -36.253663 -56.398127 126.514781 51.145221 44.800917 -156.116070 1.030496 -29.557944 0.000000 0.000000 0.000000 +-16438.438969 -2554.283636 -17762.463149 176.654374 -41.399134 19.352636 101.778855 15.231912 -4.543618 147.375114 68.699269 -42.717565 144.030173 -63.844105 43.915150 -46.843400 9.353525 -27.835830 -103.122582 -39.737277 -2.676376 172.452843 77.199551 -36.684698 168.013561 63.682191 -15.158941 -122.300087 46.726711 -171.931827 -124.454687 76.148506 41.690481 94.268356 4.200405 -11.217801 88.082656 -2.759539 -79.022556 89.344380 44.291769 -65.651723 151.292118 -17.398665 -25.742642 104.489590 0.730819 -38.584914 94.568732 50.487052 7.037863 -51.113795 -36.337265 -71.665838 138.067944 -11.625242 -5.422323 0.000000 0.000000 0.000000 -95.648204 -30.756587 -20.098453 -141.547415 2.047108 -4.613073 47.734295 76.048667 -89.451924 -161.414189 -10.070449 -19.176225 0.000000 0.000000 0.000000 +-15368.173920 -2680.454779 -14835.050832 -16.646076 15.695631 -72.521128 115.039488 27.670493 -11.832611 -135.621710 65.092965 -43.964631 114.025865 -16.540564 68.584065 -20.414316 21.776553 -18.438082 -109.738546 -47.125844 1.894707 -107.298569 -40.986850 -20.394115 -119.850285 6.692974 75.097909 -117.896278 63.560962 -156.116620 121.216111 45.904418 -57.007255 88.238993 7.516916 -4.379506 86.467073 -12.232179 -54.365944 -115.102004 -64.679694 100.788533 135.272906 25.010139 81.793570 109.235498 -3.788514 -46.379676 64.983514 51.765115 -12.978276 -52.439680 -40.681225 -86.375070 136.831576 -6.346104 -4.601414 0.000000 0.000000 0.000000 -105.782472 -2.793759 -37.954394 -125.768885 -33.192315 2.422592 63.997538 48.091801 -125.696413 -151.776499 -5.454638 -8.902421 0.000000 0.000000 0.000000 +-14457.758103 -2437.998724 -14955.796251 -133.185606 -14.470609 -97.062471 127.771218 40.159939 1.213526 -154.056446 29.613583 -41.519921 172.712844 -67.696119 22.196367 4.363987 25.255832 8.906631 -103.603075 -23.014256 -29.590654 -107.376421 -24.859184 -28.779166 -144.596494 -60.862112 95.879741 -177.348885 50.238815 170.420160 126.448474 -28.293245 -55.589555 36.506976 59.840366 -83.959566 84.797906 63.472228 -90.137022 84.540773 41.572209 -79.889588 137.449143 26.114478 49.086381 103.135350 4.487213 -50.365607 167.224101 51.584963 31.349701 -0.228310 -70.059205 -172.871988 134.187655 10.097540 0.978118 0.000000 0.000000 0.000000 -95.377106 -6.250774 -31.423715 -179.048574 -32.037662 9.567159 -103.323293 33.015000 65.621180 -152.780000 -42.844007 -14.772839 0.000000 0.000000 0.000000 +-13632.740814 -1888.661932 -15482.316332 23.630251 -13.299070 52.552154 115.922831 36.783031 -18.439672 -108.571924 -6.208433 -40.710126 107.434081 53.381628 36.221003 -24.131953 23.041256 0.629376 -103.235289 -40.200409 -19.898635 -137.963935 56.866305 -56.222768 -134.857638 -0.947065 71.066578 138.163538 77.344057 139.916856 -163.185669 48.790662 -51.387975 -77.450579 23.456957 82.507472 -86.448494 18.802150 73.821368 62.084665 56.670630 -104.630870 96.820380 21.208398 4.096470 95.295785 3.413865 -51.885203 -143.279924 10.887708 68.370371 101.258778 8.258229 51.844955 137.633855 42.576065 -1.777417 0.000000 0.000000 0.000000 -82.627783 13.993751 -19.246737 161.313829 0.915944 11.469486 -67.302320 27.935159 92.449044 162.096940 -51.379738 20.491937 0.000000 0.000000 0.000000 +-13486.749118 -1221.873846 -14846.767663 -16.331051 0.980922 101.199502 100.352953 7.336940 19.311328 155.598608 43.508997 -38.619429 92.107400 42.038675 22.219117 -7.168047 27.770939 1.553414 -92.353653 -31.088053 -29.423344 -164.545507 36.321797 -60.469340 56.903502 -84.069107 -135.724072 24.108485 47.050061 19.797228 -94.891894 46.193184 95.332809 17.635458 68.650464 169.913979 -84.922497 -1.658633 72.736321 -56.255127 -70.371819 80.834462 170.053127 67.518756 61.705995 91.881529 8.389808 -57.244321 -139.157153 10.416880 67.222973 160.567550 25.273690 21.981892 97.501986 28.416302 -91.951805 0.000000 0.000000 0.000000 -84.117215 5.704954 -16.968206 157.268199 10.556426 33.290977 -142.414091 35.774960 119.535894 -115.705468 48.411306 -134.095958 0.000000 0.000000 0.000000 +-13093.777091 -925.578436 -14577.527847 -149.540294 59.623785 -103.874750 97.257215 -28.082792 28.482831 -164.716479 -10.777958 -43.392927 114.470679 36.176368 35.845709 17.886031 17.834135 4.835535 -91.593121 -45.590245 -28.025273 -102.582149 6.487667 -57.400349 90.570338 8.539883 64.694109 53.355414 41.941799 56.733940 -75.777839 82.961339 103.345444 91.314599 14.590725 -68.263090 -86.808448 14.853650 88.666450 -78.466195 -67.488951 111.373935 -143.973516 45.891661 137.514152 102.026485 17.957403 -65.566165 151.087555 -49.293343 -151.941855 -169.817450 17.004819 60.883166 97.903130 13.213941 -97.328316 0.000000 0.000000 0.000000 -89.558501 -9.795408 -23.436866 158.847018 19.970134 31.575539 -160.018654 -40.758998 17.267212 -113.396525 59.582321 -125.586429 0.000000 0.000000 0.000000 +-12955.925304 -924.731329 -14714.827147 -176.841708 7.609798 130.917980 95.630714 -20.395211 30.504997 119.854831 -76.559875 38.459281 118.713138 34.393077 50.936748 -4.828139 -64.300846 20.160614 -95.837084 -35.863756 -13.360045 -120.097896 56.722632 -35.640369 112.154538 26.576784 32.334184 64.970121 46.753803 28.111907 86.012754 -27.458272 -60.606646 91.557552 -27.226471 -33.138601 92.274245 9.808251 -49.116296 105.746971 -74.828333 -68.113377 -132.578333 46.897557 -136.799459 100.005511 9.863207 -66.579305 105.651091 -21.649871 -90.717810 -61.951874 4.359524 -1.540725 99.203533 -3.603981 -79.128399 0.000000 0.000000 0.000000 -94.641418 -32.182128 -22.143425 173.373988 52.960331 74.057773 -48.975628 21.129311 -110.796676 -106.371523 -37.093336 -65.757293 0.000000 0.000000 0.000000 +-12790.832829 -956.599920 -14526.760593 -173.476912 -10.347418 24.456155 104.388866 -43.964912 21.830100 99.896980 -56.370456 42.699933 121.324603 26.485992 56.487806 -43.175438 -33.562456 98.635002 -97.580691 -31.364656 -15.113490 -108.581292 59.284446 -31.330526 93.576371 25.471063 30.647740 98.704128 63.693611 89.827479 87.499534 -21.979550 -44.708641 93.607024 -29.145729 -21.638499 90.931589 15.612398 -49.348367 95.850475 -72.178660 -47.221149 -112.580845 15.449398 -86.979844 98.981250 11.209654 -67.648754 89.370789 -11.122817 -69.601929 -62.381678 6.584458 -1.317472 103.848191 -22.182495 -86.708016 0.000000 0.000000 0.000000 -107.025048 -52.670728 -26.037285 -70.333696 22.573727 -64.883388 -8.092947 50.063030 -41.527177 -104.256775 -46.832254 -54.084102 0.000000 0.000000 0.000000 +-12527.778247 -1139.173040 -14731.208629 37.315153 11.414030 83.783386 104.761353 -39.178222 28.617140 97.005264 -45.883175 46.012490 120.172748 6.234569 72.184631 -24.048505 -33.873417 17.515084 -96.622154 -28.827073 -8.948068 -99.815654 48.031161 -17.038336 101.066541 9.628507 -7.728518 -12.955095 57.360499 -10.778860 91.566800 -4.842273 -49.143867 90.234134 -15.746219 -34.571966 89.799519 8.547324 -37.085485 95.042986 -39.824666 -85.028733 -120.122840 24.576603 -68.086803 96.730355 5.480494 -58.737167 77.426029 4.355244 -54.344196 -26.723065 25.472404 27.922537 111.677161 -36.502328 -77.154424 0.000000 0.000000 0.000000 -109.686038 -27.456911 -25.190551 -54.973573 -0.921615 -57.451960 62.281314 -5.958039 114.990267 -98.991263 -24.057780 -62.221108 0.000000 0.000000 0.000000 +-12770.653356 -1068.450750 -15036.943117 -134.909802 -35.848343 -12.598020 110.620911 -58.862349 31.936557 103.540428 -47.158629 35.395425 121.291993 5.269214 73.408632 -84.056863 -8.918910 58.913202 -97.790828 -27.830927 -12.426430 -100.168911 48.722336 -41.942278 99.649228 27.717316 -12.422406 12.258866 53.581915 8.112046 94.664609 20.301108 -43.650739 94.482350 7.674797 -37.245148 91.370682 -3.026982 -29.717330 123.979350 -78.094868 19.785567 -151.079820 25.645098 -103.004678 92.613739 8.064334 -46.322389 73.204257 9.124395 -47.746861 43.693933 -26.331229 -139.554757 117.804368 -37.915566 -73.731580 0.000000 0.000000 0.000000 -99.095147 -22.031343 -26.682457 -38.377288 9.315199 -28.007858 28.727452 9.909201 141.789474 -164.843291 -50.826277 -18.516618 0.000000 0.000000 0.000000 +-12535.351100 -1500.527166 -14675.674815 97.138834 -14.951862 132.027482 142.924245 -65.883257 -33.378303 102.262575 38.324406 56.183522 116.866064 16.617985 82.218992 -26.553650 -26.473756 16.955578 -96.734903 -12.299574 -14.981661 -129.115182 51.894682 -48.361525 87.647616 26.052211 -13.550151 53.333858 51.195559 16.754495 90.599877 24.696798 -40.087370 101.056722 -15.777955 -27.394772 94.590588 -40.099800 -42.459950 86.563308 -45.711444 -59.585958 146.144626 -18.483658 -149.501595 91.516922 -15.669450 -38.043684 68.186637 16.048202 -54.628412 10.450604 17.329990 97.400392 106.642770 -17.657313 -92.673188 0.000000 0.000000 0.000000 -92.238800 -24.412698 -4.583233 -49.385490 -13.006270 -62.589841 56.023402 -0.284243 177.886821 -177.197310 -66.663871 -3.574777 0.000000 0.000000 0.000000 +-11234.692310 -2364.435736 -12946.956353 155.681943 -55.549674 -57.261539 105.900423 -66.490664 17.052000 112.037435 62.686374 61.343797 116.959946 28.915600 79.144894 -37.492135 3.282580 7.222097 -96.129570 5.767962 -13.532216 -116.830472 -6.320479 -9.086776 133.513143 41.754479 17.275330 -5.855887 51.995285 -39.364175 85.884251 -29.728413 -23.841371 104.050005 -27.847170 -22.202947 97.076919 -38.592132 -4.993315 93.887330 -7.187509 -61.881218 141.687962 -3.752700 -97.520267 81.329304 -22.230853 -35.465753 51.018580 23.652831 -62.225210 -0.298244 13.950684 103.211066 102.095232 1.542882 -91.744117 0.000000 0.000000 0.000000 -67.768641 -17.590156 -24.861826 21.358602 24.559780 -130.944668 42.756550 -54.722772 -154.521285 -147.149345 -62.600291 -28.187886 0.000000 0.000000 0.000000 +-14768.014059 -3263.505316 -9922.043039 110.823535 -8.641853 -143.702727 92.640983 -61.754886 26.048393 118.293643 57.551181 50.216988 117.306295 38.797453 76.796509 -61.474684 6.762445 -5.805217 -95.610605 1.526551 -14.319935 -132.650086 -36.911702 52.546790 115.500934 37.026194 9.615036 32.368584 21.946929 -2.649424 90.075170 33.841640 -27.550420 99.345844 -36.998636 22.837762 93.592397 -53.201322 30.077951 82.433870 -63.456139 -79.106473 -104.334027 20.138239 63.353614 39.558902 -66.358906 43.841108 -113.745274 -51.988057 -39.492660 7.678384 10.163211 86.477900 -99.055181 -37.071434 63.772032 0.000000 0.000000 0.000000 29.345410 -42.437773 -39.226530 145.957978 81.381971 -6.404791 -100.403394 54.779024 -14.489346 -105.558562 -11.714543 -88.601895 0.000000 0.000000 0.000000 +-15493.347323 -4132.123724 -11350.399050 78.485283 -51.847592 80.258172 78.896579 -57.104453 45.520713 94.263587 34.636068 23.077032 120.448643 46.609887 76.867857 -56.267809 -27.531217 2.219040 -94.469907 10.129690 -8.510438 -109.458109 -12.785512 17.173367 115.917676 17.541906 -12.899634 3.248074 55.398093 -32.484242 90.085759 34.915385 -17.483129 96.855290 -50.253664 33.892826 88.351052 -48.094673 28.278687 -80.286109 -45.904906 70.701769 -99.668446 5.525972 52.562186 5.705003 -50.013816 79.814297 -119.959486 -61.500708 -33.308657 11.391271 2.411016 95.903744 -96.433103 -43.521355 70.145058 0.000000 0.000000 0.000000 7.338802 -46.979967 10.904830 20.222642 73.902854 -149.813807 27.902228 84.024556 117.514311 -106.862031 9.952451 -61.775649 0.000000 0.000000 0.000000 +-14639.953382 -4011.298285 -9245.935274 88.214540 15.493223 -72.460807 84.077740 -56.319295 48.976380 124.730674 -79.801005 -51.599824 110.252313 43.858838 63.525903 -11.427532 -10.088889 5.754650 -93.669989 8.792673 -3.432900 -100.406483 24.928620 -29.399173 123.035086 1.648320 -50.239660 23.339929 15.269022 -2.575224 125.407551 66.043120 16.683638 88.650457 -44.185081 36.551920 81.436423 -61.296689 -40.619794 -84.058081 -63.911758 89.383862 -128.774758 54.263868 -82.321573 29.052746 -56.893359 40.696858 166.549708 -77.822780 23.623086 -6.723821 10.150075 56.359225 127.032571 -10.564198 -4.756887 0.000000 0.000000 0.000000 -23.869949 6.400297 57.567270 151.418131 43.689517 4.580212 -74.728457 38.598890 16.015040 -162.426321 4.289744 -25.846211 0.000000 0.000000 0.000000 +-14703.127872 -3818.247259 -9380.758643 152.347171 -33.703652 53.146828 89.810456 -47.503368 39.507217 -87.776508 36.845911 92.911275 112.128872 41.890106 62.365970 -9.637556 -21.676080 -2.386971 -94.672882 4.146727 -6.833576 -108.754138 41.547816 -55.840091 112.596500 -13.232801 -70.874592 17.455501 13.474099 -1.535760 -150.714111 66.631073 110.011707 96.929923 -46.594445 41.804445 87.497283 -66.039980 -47.132181 -81.573975 6.021141 102.444405 -130.987148 50.331170 -97.488270 27.421689 -47.031928 46.611990 101.570258 -20.869534 99.520252 -20.295043 -23.083532 -27.421799 126.468694 -12.083501 -2.865558 0.000000 0.000000 0.000000 -32.225092 21.421816 50.806276 160.505070 26.272267 15.997372 18.740361 22.793714 -121.757422 -159.038715 8.550658 -21.260225 0.000000 0.000000 0.000000 +-15312.383591 -3646.647894 -8605.067680 125.899153 -48.903690 147.305718 90.952297 -47.549850 44.942084 -98.395091 10.067751 68.658454 103.109532 1.580142 78.018685 16.535207 -0.666375 4.446126 -94.780220 6.993557 -9.353304 -103.039897 48.750269 -63.013300 106.447641 -24.940337 -105.173254 0.146205 16.616148 -17.379047 -150.272813 -76.659636 144.857426 81.913478 -1.351075 67.700411 -75.911365 -62.351072 83.769565 86.968226 15.188575 -68.403965 149.365527 -56.331375 -52.167196 46.521860 20.535606 54.012275 118.808452 26.199786 115.028293 -31.582941 -40.864539 -91.387279 128.955863 -17.392302 -1.841218 0.000000 0.000000 0.000000 -7.116024 -40.536277 -25.630394 157.542335 -23.655207 9.750057 -64.159974 -24.942818 -88.704055 -175.450915 -6.241832 -28.394747 0.000000 0.000000 0.000000 +-17219.667382 -3416.908392 -9451.790861 160.671688 41.285798 169.359558 95.914694 -53.004484 32.764586 -102.002373 2.193776 76.574674 106.109683 33.773907 79.864138 28.574426 -11.390851 0.937701 -91.013703 22.442322 -8.236098 92.619976 -18.709047 108.687161 113.323042 -19.950058 -109.618420 0.922257 25.782347 -13.611782 62.683874 -70.577655 -70.321899 -93.087735 55.391104 -94.630760 -80.613894 75.520433 31.102485 93.559908 0.916506 -65.390302 -162.668961 -21.797237 -99.120609 -46.098339 43.883223 -71.322405 -107.292560 -16.715839 -77.752770 -38.962284 -38.591375 -66.543829 127.372674 -16.544904 0.077927 0.000000 0.000000 0.000000 -21.073716 -51.131529 4.599139 161.935324 29.371255 7.729285 -110.299988 -36.883021 -38.792567 -172.064078 -2.289655 -30.723665 0.000000 0.000000 0.000000 +-17151.775216 -3054.059515 -8929.482527 79.707154 59.748886 85.639321 87.103637 -52.160386 55.821847 -97.190044 11.212066 73.202803 104.228574 48.814852 73.222722 23.482100 -20.477401 -6.402534 -95.779143 24.893082 -18.264028 98.939761 6.754068 109.994565 123.932342 -33.291088 -130.017367 -11.289578 34.147808 -34.662028 -102.269583 -62.818181 113.507219 89.113485 -76.087898 33.091236 -84.460430 18.305628 85.829188 90.900873 16.470141 -55.155018 -156.035548 15.293391 -122.341127 53.619184 2.095755 55.608242 139.073627 70.017073 130.357686 -61.792965 -20.706833 -55.239569 125.148518 -13.745858 -1.922692 0.000000 0.000000 0.000000 -25.003225 -43.812922 6.406718 170.228691 -12.297561 -6.614564 -28.494010 51.567859 -157.939315 -171.674093 1.032410 -28.461545 0.000000 0.000000 0.000000 +-17165.797280 -2626.158148 -10190.667023 -111.252858 9.136628 -77.013927 96.526590 -54.551765 56.341449 -100.077731 37.821109 66.177218 105.955705 40.193601 79.958188 7.137391 -1.734724 4.506988 -99.376227 12.515226 -12.981526 101.440499 8.575079 96.511758 124.648231 -13.555837 -104.719705 -12.240049 25.647746 -37.286121 -102.016942 -19.937689 111.404499 97.974050 -59.799601 -54.927633 -91.535681 62.751335 82.539115 87.482642 26.249731 -53.166284 -167.101502 1.807435 -107.050261 24.726195 7.776198 83.461908 168.116816 44.893997 84.704660 -62.306464 -16.832942 -54.912826 126.984354 -16.058380 -2.620625 0.000000 0.000000 0.000000 -30.630462 -24.872567 30.082750 -127.528488 -53.182946 -44.626361 31.857624 52.598166 -91.556957 -167.427523 6.062180 -22.619800 0.000000 0.000000 0.000000 +-18349.127819 -2393.520941 -9140.446959 32.896218 33.237951 52.044909 88.149504 -53.559036 47.523237 -99.858759 8.242414 54.582900 106.391207 33.886811 72.055202 9.003207 1.239556 12.271180 -94.915584 4.044128 -20.463218 95.659659 -32.542921 100.685080 131.811529 -38.332424 -143.992438 33.105489 1.206206 0.217023 -62.615161 -80.958828 41.747921 110.189027 -0.148952 -25.586766 -90.978205 44.647159 42.196407 92.944881 43.109517 -21.816857 177.993030 -23.146607 -77.973390 9.950208 -36.242944 39.664720 96.322524 -27.571262 -76.378667 -57.034577 -29.243228 -58.161895 126.963112 -16.239674 -3.163835 0.000000 0.000000 0.000000 8.279398 -41.827525 -9.523774 -111.362618 -41.851652 -32.270893 28.734818 47.619673 -98.952099 -169.412026 8.959105 -22.797016 0.000000 0.000000 0.000000 +-17314.028904 -2100.455023 -11145.822616 117.122163 -15.053305 61.910640 95.890304 -60.189033 35.594332 -102.535789 11.562533 65.243077 114.152817 5.751946 75.843197 19.091307 -3.026654 14.378019 -102.319410 35.697823 -23.869531 112.526490 -1.980726 95.863141 168.999478 -48.845612 159.744988 -23.913047 -45.501668 99.906682 -84.597840 -58.066922 77.418141 113.585211 40.511521 -35.243522 -91.628453 32.325821 60.350757 92.337724 33.398152 6.926486 173.320264 17.058290 78.887315 8.275025 -79.832137 50.793570 88.215590 -8.043727 -70.173283 -68.224702 -43.498253 -58.157295 127.418720 -18.894674 -7.120292 0.000000 0.000000 0.000000 -39.212971 -52.883509 30.615980 -140.286407 -47.195155 -18.422338 40.765024 53.821503 -95.255133 -174.016571 8.277716 -25.115947 0.000000 0.000000 0.000000 +-18124.338412 -1943.685248 -10539.325960 -35.519989 12.866846 0.576771 97.572261 -30.031752 35.310062 -93.327108 52.878698 76.033268 127.140974 60.123571 76.549736 19.482875 0.739128 4.955539 -92.221235 -25.637906 -14.587026 121.796814 -53.701269 69.043013 161.279796 -5.164829 -98.111999 1.094675 28.280274 -12.038697 -92.711123 15.349244 76.964707 -123.500306 -26.800674 -99.240735 -88.694400 56.421363 -22.071105 102.701042 67.556054 0.918725 -155.413375 27.292916 -105.178338 -81.854549 43.200394 -140.657760 79.071723 3.395350 -39.656003 -39.471485 -43.947862 -62.148313 124.485626 -19.418948 -8.998068 0.000000 0.000000 0.000000 14.460478 52.077911 65.374715 -91.244658 -29.221743 -33.719006 41.618924 46.746821 -95.139771 -170.554309 11.654445 -20.545431 0.000000 0.000000 0.000000 +-17123.478847 -1615.020941 -10910.835454 -63.729840 -17.288922 -20.553834 104.695548 -45.626994 47.188117 -96.992767 46.286924 71.073428 162.894258 -25.185020 62.754059 18.818003 6.332362 -4.916024 -97.900338 -6.514668 4.120289 -100.734443 7.629933 87.288710 -138.814324 -49.933145 113.793442 43.730367 26.570786 -22.355558 134.753708 38.876171 28.830591 163.047542 -85.733802 -38.046580 -89.115287 30.010744 20.687097 108.716091 48.987362 -54.552782 -138.856665 25.531776 -84.562130 -34.342395 -54.158605 76.522658 108.313935 11.140499 -41.243218 -68.569137 -38.529369 -36.811458 126.765830 -19.081324 -5.687823 0.000000 0.000000 0.000000 -79.676440 64.743617 -44.149971 -108.535848 -31.431514 -52.089535 40.100958 30.753829 -83.067134 -170.053349 8.396586 -22.661063 0.000000 0.000000 0.000000 +-18656.196637 -1346.020697 -10855.241246 41.691220 -58.905374 6.702167 109.999436 -16.148435 42.299366 -125.361907 14.399033 -8.274507 117.528862 51.334579 55.953622 16.381592 -21.434999 -5.869750 -104.670828 -33.685266 -2.365057 -96.939834 53.767118 33.875650 170.809966 42.923940 -31.164022 7.071349 43.867902 -23.318915 121.537421 -15.582446 27.377847 -93.761659 -2.404932 -84.298166 -91.306138 -36.130971 -47.602361 -135.459687 49.900006 -19.807073 -144.199037 20.801113 -77.752610 -136.342735 -37.554216 -107.788771 107.831210 11.746510 -56.811726 -55.945654 -6.742897 0.814228 127.161640 -24.241626 -7.500802 0.000000 0.000000 0.000000 -75.723363 75.748977 -11.669029 -107.364024 -26.313074 -52.600829 104.209478 -2.054960 123.398319 -168.580605 8.702498 -24.736891 0.000000 0.000000 0.000000 +-16449.617601 -1248.687323 -11604.802217 157.196133 -57.423058 -21.831077 -106.556260 -20.473970 -88.652006 -127.380976 48.196794 -45.537466 112.187942 41.230171 55.185638 1.254114 -15.808710 4.350542 -101.920706 -25.181422 -12.024441 -141.443572 47.844508 -69.756720 -177.512667 -10.617814 77.921951 -10.837808 46.230769 -20.074013 105.254518 17.411954 -19.063042 165.135563 -55.461594 -89.045921 -122.348200 -30.076400 13.185917 -130.559669 3.959157 -56.103555 -123.786050 -19.655058 -52.471059 -45.273720 36.179665 30.526471 96.162587 -21.467910 -51.812358 -82.418193 10.989279 22.444274 122.666166 -20.129838 -9.774045 0.000000 0.000000 0.000000 -83.321181 46.448964 -2.955309 -101.041238 -27.586367 -51.857507 -154.050894 -68.950126 -26.789854 -172.721765 5.502550 -26.256449 0.000000 0.000000 0.000000 +-16813.210316 -952.597296 -11961.567411 -138.038688 -8.832493 -158.947212 138.904562 -79.181660 17.410051 94.049015 -22.929413 -20.970109 94.677419 79.959077 33.406900 6.509250 -22.801000 14.105605 -106.202378 -50.316818 11.687814 -93.502718 48.442302 39.510933 -136.613449 -45.460716 90.303416 -7.731130 51.240748 -28.590529 108.668110 -23.119665 -16.172142 -80.500516 40.752000 -82.071887 -85.658625 -19.297344 -36.513688 91.696839 59.128612 -69.265770 164.261416 15.928062 -172.653153 -42.970573 -4.850177 -106.351980 99.074409 7.738918 -70.690804 -47.152093 -57.490706 -129.684114 116.742068 -25.757563 -23.564001 0.000000 0.000000 0.000000 -107.474812 -30.814070 -28.358220 -94.817550 -23.105126 -58.930390 75.317057 -33.515518 -161.490140 -178.325871 4.322101 -26.287331 0.000000 0.000000 0.000000 +-17390.985012 -992.069525 -12299.887621 105.427516 2.385459 -178.524925 -99.083205 24.777355 -82.243300 75.914919 -42.307427 -16.970213 108.233539 46.079913 74.612930 -2.076158 -23.201132 5.960827 -125.020504 -51.634932 15.970702 -122.034572 41.486257 -12.831728 -161.013124 -7.674767 84.696654 -17.129779 44.492049 -23.897661 89.594623 1.540278 -9.017296 -92.719675 -12.875060 -66.135019 -90.721540 -27.356712 -60.329762 99.427410 57.087098 -75.799983 167.304134 -34.086378 73.319696 87.780821 42.546886 30.238388 -101.895162 56.451880 57.452366 -42.729610 -55.178709 -172.525135 101.628934 31.753862 -68.874266 0.000000 0.000000 0.000000 -103.994676 -12.984129 -40.932024 -85.249844 -19.374129 -55.780449 52.594979 -27.011875 -117.596482 179.403620 2.990183 -19.105887 0.000000 0.000000 0.000000 +-16253.876720 -868.843145 -12629.604393 -167.821747 0.844704 -165.180054 59.251200 -19.604200 -111.708760 77.883090 -30.744466 -52.704001 -81.136162 -66.159705 -6.588564 -17.252361 -37.124173 5.532522 -104.679707 16.577080 60.525526 -107.838980 -50.847514 91.373805 -119.218663 -21.294305 89.138289 -22.743921 59.295095 -9.285458 88.666562 -25.826155 -2.398611 -97.374267 -39.105534 -35.786475 -90.146161 -12.239374 -58.084300 90.149932 36.375190 -78.117147 159.127994 1.212090 83.678489 55.320861 55.089547 -33.702746 -82.619316 42.634824 53.921626 91.465075 -49.166085 20.102381 104.325437 26.580424 -53.851847 0.000000 0.000000 0.000000 -99.013243 -6.389090 -48.523493 -82.717937 -17.878503 -57.243353 57.649054 3.012069 -106.864313 -164.656024 17.261458 -17.130658 0.000000 0.000000 0.000000 +-19118.744594 -911.459478 -13851.381324 -69.439739 3.777037 -167.579361 86.905383 54.499914 -149.775600 -77.082423 -14.069245 125.021711 -85.545728 -56.341322 -8.921135 105.500755 -13.710819 -106.922815 -125.919452 54.066782 51.292034 -128.591005 6.516130 92.365519 -164.007421 70.637971 38.227084 -139.582967 -0.266167 -87.771990 93.663020 -31.915240 10.569483 -95.736164 -24.523968 -38.553842 -91.072334 -12.927952 -56.115848 91.007199 42.425078 -75.796684 126.030361 -12.148108 88.310512 70.125148 33.142682 -36.060628 -85.968300 41.649646 46.795408 36.144118 -35.203849 108.724344 108.469427 29.513940 -57.351241 0.000000 0.000000 0.000000 -106.445369 -11.403357 -40.873743 -80.651761 -21.130233 -52.463894 60.688876 34.713856 -107.469959 -152.553035 23.880952 -11.104804 0.000000 0.000000 0.000000 +-19211.279733 -621.617682 -14619.802992 20.733711 30.960189 -166.868385 132.420411 46.251001 11.192790 -77.041449 52.712332 109.376607 -93.345049 0.508197 24.816079 -100.270679 -38.031684 41.706020 91.540622 -3.630029 -83.476876 -122.039857 38.846376 125.558765 -130.959364 24.731064 77.306722 172.576174 -64.386012 -123.887809 94.779689 1.820366 -0.306278 -95.730755 -36.555867 -31.325094 -89.914229 1.803644 -52.089754 87.696838 4.029252 -62.818316 115.492477 6.505310 86.853371 84.383450 -28.297508 -32.681653 -7.857946 80.140198 170.593359 -13.160788 -37.593921 -113.475208 126.459586 -10.869895 2.194855 0.000000 0.000000 0.000000 -116.333660 -5.516266 -37.976249 -72.941870 -16.600238 -56.057196 57.619525 20.663103 -112.060692 -142.899550 9.720043 -4.114634 0.000000 0.000000 0.000000 +-18793.998357 -626.102789 -15398.618178 145.413702 -27.659864 -132.267263 110.495170 21.231276 7.330320 -59.132715 17.042892 122.945993 -102.788977 16.477204 -16.420574 133.660967 -3.517810 122.301734 -91.464212 -25.370846 85.501388 -18.405314 -67.593978 -173.309982 111.937523 21.085873 -33.365638 176.158786 -77.061846 118.289229 168.009689 70.635808 5.838653 -97.646664 -42.338001 -13.361324 -94.225973 44.872313 8.436937 84.869524 -20.202690 -41.239865 123.663686 64.905446 84.535377 67.813644 -56.094613 -18.241294 70.612962 16.523822 -47.225906 -19.730258 -56.978483 -92.470180 127.326183 -15.894381 2.097822 0.000000 0.000000 0.000000 -125.682802 -7.798106 -28.298219 -71.029411 -15.664823 -56.855865 19.726123 62.206819 -115.127179 -144.162061 4.430314 -7.722395 0.000000 0.000000 0.000000 +-19872.885884 -449.730597 -15455.979088 -93.603114 8.529992 -3.645452 97.515814 -21.354659 18.670050 -82.936326 5.469402 89.304721 -122.975741 -13.870869 -86.418609 1.149810 13.615958 5.116084 -93.154189 18.277433 50.047018 78.694580 -1.910969 64.922384 110.733946 -2.253777 -25.992345 3.870836 -17.252825 0.451571 -90.699230 43.180852 70.305649 -99.975232 -54.593782 43.571857 -89.370701 25.183624 76.328480 81.857607 -36.992778 -27.973143 101.659078 37.617258 53.212406 90.129181 -39.837814 -37.387648 104.946059 20.182390 -64.247331 -28.717788 -70.412048 -84.138208 127.251749 -14.356244 4.079052 0.000000 0.000000 0.000000 -129.103529 9.499049 -33.488178 -144.026678 -72.385464 -27.810493 7.398408 59.585695 -112.575534 -152.045058 0.174316 -9.742966 0.000000 0.000000 0.000000 +-19102.867529 -599.743414 -14228.979234 -109.258792 39.006250 102.377188 90.317074 7.629710 18.827286 -83.305752 -42.060638 98.031344 -122.990672 29.460090 -111.755894 33.076219 27.042513 -13.230945 -90.085348 -22.408958 21.607725 87.406159 -41.285749 83.469511 108.387547 24.901406 -77.587373 4.670276 6.424141 -20.591056 -91.297979 63.009109 70.751263 88.046117 41.494607 -102.675067 -89.180620 29.632042 82.627960 85.432507 -43.777811 -26.600972 119.189217 40.704117 40.394447 108.113150 -0.505729 -43.380110 103.898653 18.558235 -65.602033 -37.209889 -58.967155 -70.896344 125.585630 -12.058805 4.744873 0.000000 0.000000 0.000000 -99.133072 14.180499 -20.057034 142.859427 -31.876122 41.925873 14.168108 59.859435 -103.001121 -155.014238 -3.959125 -11.113464 0.000000 0.000000 0.000000 +-19994.375059 -350.443365 -14582.049695 -27.953369 -41.707031 -49.975353 86.329762 -10.702892 9.810397 -113.018784 5.638391 -73.758769 -120.240741 34.078588 -112.558843 179.644313 -34.966877 70.583642 -84.875289 -72.968662 -34.091120 110.071975 -66.888144 56.194060 100.662007 -23.476344 -57.662390 -20.035306 -8.145591 -23.698324 -84.719610 59.620165 93.629730 87.397745 22.105469 -79.897282 -87.910960 -15.656723 92.195537 86.273140 -26.911174 -7.695319 112.295281 4.569449 18.023745 96.913935 5.569390 -57.331163 105.291074 22.855738 -63.212749 -44.916566 -40.339524 -80.463228 123.574592 -10.164297 2.514656 0.000000 0.000000 0.000000 -83.976219 3.102576 -21.744537 150.098881 -2.881136 43.766965 39.050314 32.245022 -102.578815 -162.474472 -16.558826 -0.973686 0.000000 0.000000 0.000000 +-19953.249593 -91.379393 -12554.608462 -151.925681 -46.436732 -61.902096 88.301587 43.009459 -23.784520 -112.022878 42.776227 -73.036578 -111.213493 32.554087 -98.510201 -174.170258 -19.157541 -4.458349 -82.208309 -86.820034 -60.643854 -126.173858 -2.648721 177.145788 98.179863 -0.414428 -105.280091 -154.623719 1.766177 -59.993676 -78.982446 20.116708 96.957585 90.815247 23.336435 -72.077607 -85.628991 -68.000862 92.324232 91.087030 -22.122374 10.136593 121.999702 15.789647 5.511553 92.849544 -0.745714 -52.616011 -116.610432 -19.807213 101.210385 -47.001329 -28.730485 -82.469419 117.280541 1.772841 4.094711 0.000000 0.000000 0.000000 -69.591412 20.199910 -13.354820 152.002199 4.266917 41.015112 48.673229 20.434934 -104.654750 -159.731158 -29.281915 9.887982 0.000000 0.000000 0.000000 +-16890.461063 133.157724 -12131.633250 67.559434 -9.357412 24.377766 92.452831 37.218878 -36.227678 -169.965113 -34.799878 102.045201 -129.013899 42.630937 -60.285219 -168.367170 -21.305550 7.300929 45.501245 -76.690672 160.889863 -119.671247 -15.515951 130.519728 16.703858 -53.194904 74.750884 -164.650278 38.132633 -2.592627 -80.771396 -4.254009 95.088407 93.182488 21.181621 -70.541736 88.678006 21.656753 -77.300716 -155.918718 -80.940056 -16.795791 112.218254 38.924358 -65.583340 90.497034 -8.215673 -51.061535 142.234857 59.474067 -21.905507 -52.380875 -25.120248 -87.442606 -82.567072 -65.087279 44.861296 0.000000 0.000000 0.000000 -85.115993 20.052588 -21.050958 157.573961 5.479568 55.370488 59.758703 11.472080 -120.727081 -71.141303 -53.741481 -78.519616 0.000000 0.000000 0.000000 +-13997.158644 308.633661 -12019.946708 -93.183083 52.938660 -85.898271 90.328693 43.018995 -50.552815 -126.088894 -28.885044 60.384526 -136.731450 35.081931 -81.726513 -160.575043 -17.837271 0.152215 44.055493 -67.513488 146.250626 -120.838629 18.310448 130.391636 44.206310 -69.156348 66.746832 164.138491 22.733556 -27.969428 -81.124995 -9.718152 96.263779 93.319669 31.304883 -67.785211 88.384872 31.315112 -68.355449 -96.515016 -78.995706 -78.019842 101.236239 11.359909 -90.646326 94.158020 -10.586989 -48.822804 -175.709635 64.394032 23.083312 -61.213945 -23.122168 -88.379567 -84.848744 -61.598432 48.541806 0.000000 0.000000 0.000000 -94.293164 25.737643 -29.686927 152.331244 -6.527757 58.857896 61.302960 16.188085 -121.310982 100.443527 -25.485141 73.126178 0.000000 0.000000 0.000000 +-14258.780670 708.915155 -11133.724699 3.775204 -35.888190 87.981711 45.774657 77.258995 -95.816773 174.335462 6.476105 152.857136 -150.888624 70.763565 -55.466332 -167.955248 -29.610316 1.001223 80.804603 36.788150 44.361461 104.013913 49.008275 41.694003 24.981878 -61.049734 92.896568 142.952371 -1.340514 -92.252972 -85.757345 -14.817339 85.564013 93.360975 -2.510327 -86.128812 88.690588 20.264195 -85.154542 88.024934 -18.172459 -62.408698 -124.823813 -85.624786 82.679523 98.952100 -20.415944 -52.561032 -139.814916 52.018040 58.262766 -72.281295 -43.145116 -81.080118 13.390900 -67.381705 -52.137932 0.000000 0.000000 0.000000 -101.881536 21.031638 -47.626015 -56.401303 -28.598917 -77.694846 63.385967 8.400878 -125.283970 -77.651113 -25.922804 -93.236386 0.000000 0.000000 0.000000 +-12931.940258 1253.552423 -12945.768680 169.084018 -19.337438 -131.002747 49.788978 78.217116 -127.358409 -108.616585 65.655414 -95.115964 -133.478255 77.868954 -51.788410 -168.167144 -16.810728 12.197786 79.297294 40.170875 22.474133 87.177993 38.834819 55.418977 22.628701 -73.677781 89.854726 137.081678 -30.830380 -144.672568 -89.133188 42.091610 80.628764 -96.033541 -52.363814 89.756220 -89.329596 14.185872 89.895310 94.153450 66.666932 -84.773497 -23.182040 -76.573789 -73.556279 93.882315 -15.830581 -43.449271 119.692553 58.309153 -25.533735 -58.841796 -65.959017 -100.034629 58.591769 -41.793901 -89.421572 0.000000 0.000000 0.000000 -118.107380 7.535977 -26.291207 -48.502573 -28.032977 -57.857979 65.995131 -4.995864 -130.755986 -85.962329 15.597958 -101.167210 0.000000 0.000000 0.000000 +-12523.256488 1646.370417 -11786.393256 -1.285778 19.102246 137.156077 43.919451 82.857444 -145.634764 -93.671298 -6.330907 -51.163182 -153.651492 78.858959 -66.884729 172.890795 -34.696864 78.160837 79.375278 53.530771 37.999954 83.837765 38.404533 46.413818 8.181259 -38.664736 91.342698 149.752356 -6.393575 -119.728678 -73.569072 68.455314 89.354279 -96.237049 -77.322313 88.322176 -90.474853 16.934722 82.651709 74.596070 -66.331234 -25.300571 -42.120283 66.654564 -81.903453 94.526303 -19.826595 -41.570168 95.295342 46.678746 -32.594254 87.485733 -71.591465 132.247727 63.034580 48.770273 -72.722934 0.000000 0.000000 0.000000 -106.597388 19.864075 -35.580681 -39.466579 -21.848397 -63.178791 62.698104 -15.516587 -126.230423 -85.774645 -37.081800 -86.188998 0.000000 0.000000 0.000000 +-14401.462455 1709.739557 -14728.227396 -34.944772 41.401801 93.331147 -87.568098 -76.390975 38.223514 -95.548196 -4.559634 -38.241527 135.953734 55.203504 -100.455890 -162.602588 -3.755193 11.535937 47.079923 69.906206 7.684623 84.449101 22.481775 54.520587 39.375996 -55.061289 96.480073 131.268147 -21.408675 -168.858153 -83.334911 70.376247 79.397461 88.748007 -68.113819 -90.901315 -90.075618 8.022455 85.374070 159.142140 -19.398666 -70.080283 -45.417708 65.831654 -81.357045 94.166318 -15.519682 -42.040515 98.842034 39.993876 -32.082803 45.165264 -77.163263 177.134709 82.508907 46.553129 -57.380431 0.000000 0.000000 0.000000 -109.362717 21.142888 -22.946289 -32.729218 -26.876174 -62.201638 63.854825 -23.248037 -123.724681 -90.035707 -60.355026 -69.683312 0.000000 0.000000 0.000000 +-15551.742281 1258.510675 -12634.641237 -177.843764 15.544580 -59.028959 -76.799466 -81.569843 40.645104 -112.435850 57.395907 -53.194454 111.057115 44.076430 -102.036528 -167.523665 10.003098 67.427577 60.193768 62.722477 40.273025 85.369753 -31.185663 41.042568 37.832480 -68.125126 78.171657 161.984356 -6.353318 -174.722270 4.594414 88.061688 175.072032 80.862988 -67.979634 -87.694560 -90.089630 -11.761520 78.501831 176.833335 68.507802 -98.334687 -64.349374 -1.106803 -86.785850 95.308248 -17.031829 -48.245335 98.385337 36.354810 -37.108729 0.221849 -66.096021 -109.278671 -97.249502 -42.845627 9.581048 0.000000 0.000000 0.000000 -88.666113 22.285928 -9.985618 -37.447967 -22.161619 -71.463241 51.693432 -62.309576 -115.259439 126.355205 51.796720 40.478432 0.000000 0.000000 0.000000 +-13704.263026 1011.207086 -11851.474378 177.232079 -77.990889 -70.110026 -93.084113 33.538864 58.529697 -132.771340 45.452073 -62.671534 130.730542 79.676112 -75.484125 -160.171247 -16.638495 64.485544 72.755414 55.764810 19.501786 84.088001 -46.780894 17.281304 -17.946983 -72.541652 122.012734 178.823770 -8.876800 -150.613378 121.242775 88.492040 -88.851100 -78.083728 -77.755007 65.194568 -90.571221 -14.005165 69.854976 -155.941872 71.856278 -95.373754 -63.531787 6.963215 -83.048311 96.054546 -11.638255 -50.730541 105.996080 42.439623 -40.391123 16.994423 -68.399372 -128.794432 -94.156540 -27.746975 -2.432658 0.000000 0.000000 0.000000 -87.699702 19.734020 -5.802319 -110.289109 -53.725539 -66.306358 -73.686896 -75.546532 -4.374984 107.145188 26.485969 12.913268 0.000000 0.000000 0.000000 +-12870.014469 824.299433 -13507.170970 -19.077885 6.645001 106.643675 -79.394289 83.112233 82.442342 -165.696605 -76.031984 26.041044 92.101040 63.074464 -103.315283 82.580979 -39.747245 -35.388177 81.259322 38.186872 12.193644 93.918026 -50.571497 -7.337698 123.318871 -61.131544 -77.884071 168.564384 1.611487 -173.341096 114.526208 80.466340 -103.977733 -83.931338 -87.577010 74.251593 -92.308497 -11.434934 77.651760 -147.137017 74.699326 -116.791422 -77.117939 -23.859966 -60.061644 83.999016 -21.121506 -20.312498 96.981031 19.589672 4.229044 71.906238 -59.398769 -135.456401 -93.822439 -20.859107 -5.591456 0.000000 0.000000 0.000000 -85.527027 8.388777 -10.152452 121.793712 -22.736317 80.807622 -105.730513 -21.522972 -17.433603 100.498033 19.484989 2.440833 0.000000 0.000000 0.000000 +-14387.922487 782.903009 -14301.387653 125.178925 -56.420252 -79.383716 -45.336038 81.921582 119.034225 140.012419 -63.842802 35.005140 64.237088 86.787345 -146.303832 55.614433 -45.848400 -7.028899 82.202593 13.106952 1.938852 100.068565 -45.830170 0.719959 106.028358 -7.001017 -103.223432 176.481116 -3.870179 139.646926 94.076663 86.295979 -154.226798 -95.268959 -37.263590 76.378731 -92.833116 -9.245231 63.921358 104.807568 6.025879 121.243497 -77.189365 -41.700279 -44.089764 80.723539 -29.725034 2.485595 100.738393 3.109799 16.861746 88.074799 -55.191583 -136.614089 -96.428834 -26.661884 -4.476773 0.000000 0.000000 0.000000 -84.564883 1.368329 -8.947623 127.956038 -17.519762 78.251444 -109.213232 -2.012826 -32.859945 100.284085 16.323105 -3.255917 0.000000 0.000000 0.000000 +-11847.635183 960.126642 -15957.481370 -105.431651 5.963520 71.543952 -17.041795 86.337667 154.482043 96.066342 -57.926054 91.519133 -79.487186 63.712039 87.457743 -82.911349 -54.085534 115.016642 86.107375 34.085784 3.078998 91.433639 7.751710 19.879031 100.947467 -38.381568 -78.695764 133.803222 -12.360775 152.005142 -179.486693 83.593153 -87.037873 -97.682862 -39.357207 54.445143 -97.630288 53.690145 77.259417 110.333744 -10.789737 115.760310 -89.000705 -47.845966 -46.380960 78.088307 -3.709617 -8.946239 94.230005 -14.709557 33.082316 140.538869 -56.740123 -166.596974 -100.648414 -50.859071 4.555248 0.000000 0.000000 0.000000 -90.528530 10.305571 -15.322765 133.994138 -2.353966 72.337524 -119.645312 -4.515214 -35.767978 101.993683 39.243997 -32.443422 0.000000 0.000000 0.000000 +-11064.226958 1077.066828 -18518.455816 93.067173 -39.715571 -103.688399 26.951479 85.398490 -153.322104 87.559046 -33.946722 102.572062 -73.179900 58.437856 83.303971 68.889599 -35.312468 -33.213796 88.545244 29.363672 5.801880 92.459456 10.873908 26.412009 93.828146 -24.888460 -92.324797 142.775752 -42.094505 151.506043 -171.924552 85.060349 -79.551727 -96.519340 -40.948747 45.812384 -93.615583 44.887979 74.233630 103.738755 -19.625510 92.362045 -107.469025 -56.462466 -23.848933 64.000375 26.125901 -7.251942 81.298431 -22.360194 63.771986 169.253385 -56.176102 175.000786 -113.253495 -70.276348 9.953643 0.000000 0.000000 0.000000 -84.499308 1.309250 -10.207879 126.402613 -18.688486 69.329505 -126.591326 -4.544253 -36.460965 99.427727 73.996231 -73.321826 0.000000 0.000000 0.000000 +-10578.011454 949.597528 -18157.687194 -146.337931 -6.599247 129.177895 -170.451107 52.547852 57.430143 88.302717 -3.437506 97.127373 -80.037529 40.586995 76.363773 176.263764 36.383244 -123.998733 83.502012 57.272612 18.861996 94.336231 -17.414241 49.835696 145.018024 69.932866 -47.355644 161.928764 7.102686 -80.679892 114.367256 -83.874009 16.110309 -94.546411 -23.088994 40.461786 95.195832 -19.902617 -74.986473 102.765895 -25.576043 90.578920 -92.371556 -41.596690 -17.397699 81.618568 23.933944 -71.896548 81.362091 -11.321612 96.290298 176.966659 -56.840230 132.316225 129.010094 75.810299 -2.802276 0.000000 0.000000 0.000000 -93.314517 -3.301793 -20.564706 152.539126 -9.314637 -42.979757 -135.655561 -7.718179 -42.071932 -85.101804 -4.897065 68.864502 0.000000 0.000000 0.000000 +-5948.152518 1018.835688 -19919.559543 -69.311451 -32.836483 2.310818 157.109131 -74.793696 52.336134 89.974901 -11.565708 90.944996 -79.119743 54.204290 87.379671 127.250665 -8.336285 -161.141461 82.434535 71.231942 30.885681 95.159247 -22.147458 72.463570 100.159474 -3.054828 -93.137478 145.928468 -18.598974 178.834880 141.322433 -61.361511 -36.612627 -92.532762 -19.441754 36.069027 92.294142 -6.639526 -62.306868 109.117634 -18.629996 92.192448 -89.932809 -44.148940 11.094919 129.624674 49.078557 6.584015 74.982817 30.748785 84.911376 166.752377 -59.732351 142.020927 87.128470 72.952952 -56.907700 0.000000 0.000000 0.000000 -99.128912 2.790276 25.507249 157.782868 7.730948 -16.250781 -125.521821 -2.724868 -35.116290 -93.854068 -24.224630 34.574240 0.000000 0.000000 0.000000 +-7134.453791 374.302942 -21915.121700 1.891926 29.707801 -66.686017 -114.913668 -83.016591 3.664314 97.423756 13.612798 103.540848 -77.845438 9.414090 86.350284 126.435038 1.655109 -131.059483 86.830474 59.931966 30.761498 104.532441 -38.789578 39.342501 -176.845328 70.270380 -18.639056 -175.260552 15.926445 6.171860 134.833859 -72.908003 14.764079 -91.055586 -21.017742 31.073087 92.628519 6.760328 -54.995058 102.777576 -26.142397 77.318468 -99.043592 -45.330193 24.434901 136.214292 60.587856 -33.727734 -126.922164 -51.390335 -42.274417 161.934315 -58.074056 135.399717 136.000925 11.832539 26.568113 0.000000 0.000000 0.000000 -95.842551 -2.285908 7.108882 164.933722 -18.860645 -16.128774 -133.545425 0.471494 -34.680346 -168.231332 0.619101 -28.245885 0.000000 0.000000 0.000000 +-7329.089156 588.924921 -22146.885670 -105.913345 35.472796 139.170594 178.991962 -47.375651 70.336441 138.304970 -72.667694 90.738987 16.271631 80.425764 -141.635466 106.818974 -47.864663 -159.724870 96.660525 64.965619 53.856256 101.739489 -13.996385 63.888549 136.637237 45.930472 -76.569038 166.332977 1.576738 20.062803 91.969490 -53.594142 18.681541 -94.034389 -32.482823 36.354756 91.516847 -11.951870 -70.182278 123.151599 -19.172661 104.375951 -99.844185 -42.627452 47.460956 93.457105 32.898662 -28.398210 -160.823464 -57.676268 -0.440455 149.486553 -62.545972 155.123277 132.913661 -2.200910 10.654510 0.000000 0.000000 0.000000 -97.764925 25.677723 5.049143 -179.199067 -18.816496 -16.513247 -150.402565 -28.478125 -20.891703 -151.560352 2.248347 -29.753996 0.000000 0.000000 0.000000 +-8271.468821 180.662867 -23769.660563 68.521228 18.605700 89.266472 -134.427006 -67.513852 5.748983 -92.302680 -16.376716 -16.073647 123.395823 57.734190 63.556393 6.048881 -41.607836 1.613942 99.147347 -47.487350 76.297885 101.057803 -27.902039 62.622845 -176.813685 -25.758756 -73.113142 -67.755224 37.486756 -164.305891 102.782522 -41.335378 21.835738 -101.591152 -23.260079 25.176291 90.333637 -29.545788 -67.192228 123.822486 -47.716229 21.976513 -111.966927 -32.986912 70.026772 95.945450 42.603735 -30.364180 -161.555191 29.403153 4.618934 170.452644 -61.730426 125.701347 135.796621 -13.920668 4.858894 0.000000 0.000000 0.000000 -69.162259 24.250250 14.677697 -169.924373 -19.443671 -46.843180 -176.134150 -43.926022 5.240830 -156.100584 4.415043 -31.437724 0.000000 0.000000 0.000000 +-5788.885097 139.325693 -23401.888230 162.525349 47.526932 22.493552 -102.323681 41.307523 -82.381954 -104.195948 10.492520 10.815872 130.794758 67.757583 98.175216 47.022057 -57.521358 -93.370271 -106.564230 -71.824648 -14.833804 109.689888 -37.217765 60.115219 139.292780 -20.103657 -107.293948 -71.826649 -48.503222 92.789624 92.331214 -18.226950 0.624623 -114.943958 15.680548 -79.317099 91.465738 38.111760 -82.989113 132.175613 -28.151702 -71.864386 -120.508997 -40.729666 64.117334 84.068987 20.514138 -32.732780 -159.791423 40.759121 14.708958 155.793379 -82.581324 142.019016 134.652630 -12.024376 6.890199 0.000000 0.000000 0.000000 -52.389609 26.849048 12.569640 -159.228521 -22.264500 -53.261446 143.085924 67.954634 113.260383 -153.677846 7.736143 -26.002597 0.000000 0.000000 0.000000 +-7750.044761 40.961306 -24552.467239 91.275499 7.600619 141.069654 -90.161290 -66.090504 -75.440025 -105.307200 46.286742 46.776062 109.005959 37.564724 68.058610 -31.404845 0.337985 43.723061 -98.677226 -45.632473 -51.087924 97.232435 -26.797052 86.609006 -161.208819 -38.196370 -87.436794 -60.652014 33.777302 -92.734708 114.557461 34.746346 -48.369040 90.082064 37.942580 -28.445505 86.831135 -48.601112 -77.188500 99.677180 -53.006931 -58.926363 -144.747959 14.642770 62.422337 75.354702 -35.900658 -8.052005 -147.427311 5.645122 35.059036 -178.948596 -71.442740 122.547755 133.672351 -10.210486 8.997440 0.000000 0.000000 0.000000 -62.957538 26.306699 20.885451 163.663343 -16.796399 -17.919200 135.122413 43.766922 71.938723 -159.682039 -7.127899 -23.117909 0.000000 0.000000 0.000000 +-7395.876811 -168.216665 -24048.752631 75.201541 -49.458433 102.477776 91.475208 -28.088187 29.783961 -98.752624 33.920063 35.415038 123.991202 54.612810 88.949313 102.386508 -0.739809 171.635556 -101.719763 -52.723727 -27.594756 101.972261 -34.624476 66.779457 158.662537 -17.907249 -107.196598 9.354795 -24.108141 -5.740855 -96.050005 29.101356 39.554949 74.330444 79.182162 -88.187782 -87.981701 45.471138 96.609692 110.973434 -48.578423 -54.561457 127.194975 38.992051 -15.219985 75.419730 -19.944557 -16.284804 -141.738256 0.820385 53.717793 157.201779 -49.810994 147.682853 133.461503 -10.338465 7.852246 0.000000 0.000000 0.000000 -64.711062 22.921621 15.947851 158.727481 -10.139472 -11.159480 -143.129044 23.278909 121.005711 -167.806722 -14.747731 -16.828398 0.000000 0.000000 0.000000 +-7542.134833 -137.243155 -24284.753206 114.397264 -8.188572 143.852103 95.350326 -42.286552 51.995027 -102.393287 1.677998 45.710424 -120.025936 56.501448 -133.267051 -92.254812 23.265068 -169.869756 -109.912212 -41.851846 -21.681481 102.681094 -27.147764 71.674374 167.982616 11.863588 -87.346393 -105.292989 15.158606 -178.771120 -94.592553 -12.962684 46.824256 -104.776841 58.317049 89.437036 87.130487 -66.261041 -82.089903 104.016933 -51.586548 -44.306165 124.996090 22.852013 1.606195 78.436629 -22.740767 -7.341423 -142.155668 4.037205 47.310181 -173.340384 -64.005918 136.505829 137.836291 -12.568441 7.780109 0.000000 0.000000 0.000000 -67.753117 22.508930 17.195367 157.066361 -11.596619 -7.339139 156.281256 17.793445 93.950747 -160.073686 -13.191469 -17.065098 0.000000 0.000000 0.000000 +-7679.643945 -438.590826 -24366.123800 64.285350 -70.476983 91.340866 94.693905 -1.452996 27.223979 -95.937319 -2.511159 16.145192 116.411982 -36.771588 66.608219 -153.748584 -3.847852 108.312257 -96.072436 -27.244973 4.722971 140.511767 -64.319141 18.477981 166.815377 -36.159278 -54.541881 -124.924341 24.367874 -111.574121 -90.698629 -3.263682 20.878183 86.083479 76.797242 -81.881171 85.641222 -60.805119 -69.380220 137.529796 -62.404267 -36.732353 117.647933 18.880480 -11.374050 95.096668 -0.692882 -26.518908 -134.815478 -9.011940 77.702368 5.629419 -65.107651 -95.423417 136.712749 -13.550175 5.143372 0.000000 0.000000 0.000000 -71.510710 21.260763 12.471857 157.793402 -9.107038 -6.403201 -163.140286 3.525878 -13.561152 -156.414941 -8.271918 -14.730447 0.000000 0.000000 0.000000 +-7416.188311 -734.944586 -23657.434622 165.941551 -22.129786 -128.027029 90.910022 -21.771092 7.106968 -106.606976 9.401549 34.034994 120.890727 5.106992 88.870600 154.677311 -16.482299 101.915138 -103.174665 10.346599 1.013026 -157.110904 16.218258 86.108084 140.625020 -28.972407 -33.856920 -79.447270 7.882364 -129.338091 -90.868216 26.360736 37.230339 120.152498 81.936706 -50.320496 87.910575 -60.089361 -71.960392 98.803078 -37.376570 -35.375016 109.501778 7.907097 -39.949982 101.968870 9.995297 -30.169123 -125.341370 -0.089823 74.026826 -35.165288 -41.542495 -91.388016 135.374016 -11.875401 4.416346 0.000000 0.000000 0.000000 -80.193662 8.060065 2.956580 157.725654 -16.450833 2.691627 -170.590900 -16.884454 58.449707 -151.235122 -15.727305 -14.551100 0.000000 0.000000 0.000000 +-9085.922283 -1351.667669 -23541.911147 161.116913 -4.433433 137.312268 92.911268 -50.099108 50.993557 -111.430861 -33.320710 107.762698 123.758555 14.137448 88.932855 -69.218859 51.672459 -71.017192 -96.846515 -3.693079 4.470938 -151.986688 -50.232609 83.230816 142.480625 -58.537643 -25.651736 -115.303917 9.076831 -118.124004 -85.909309 21.893671 57.527259 99.956589 55.725567 -63.921223 86.613909 0.866462 -81.236939 100.245450 -19.094149 -39.458413 122.193259 -0.381842 -32.191816 107.998661 11.326447 -45.459226 -127.617109 -18.679997 88.432910 -52.205025 -5.512591 -73.491145 134.016901 -9.731110 0.717261 0.000000 0.000000 0.000000 -85.186111 13.234549 -9.075497 156.454798 -15.601185 5.525830 -143.649629 -31.923020 43.720839 -142.259606 -20.819009 -14.632789 0.000000 0.000000 0.000000 +-9533.013435 -1736.120428 -22577.878058 -108.933548 43.919480 -157.372737 80.301081 -49.657693 63.751345 128.171970 45.272072 50.645893 143.367536 -38.042732 80.414290 163.066425 31.845075 -176.692479 -101.198022 54.550605 4.334298 -132.083798 -33.659180 48.081651 116.658155 -54.585954 -4.601580 -165.781106 -0.017381 -95.178829 82.894030 30.607777 -66.367016 99.664255 5.116867 -62.331992 -81.400278 54.505696 65.382008 92.568802 -25.330460 -64.637016 115.798810 13.252516 -67.574744 112.847977 6.436225 -41.858176 -128.679782 -12.707424 71.501835 10.127975 -28.335620 154.494573 139.574158 7.672360 -2.147182 0.000000 0.000000 0.000000 -94.998371 12.480614 -33.557739 133.886382 -39.667199 44.941156 -165.525519 -14.926146 54.008571 -135.581141 -61.250049 -53.138563 0.000000 0.000000 0.000000 +-11403.694202 -2152.094671 -24430.078585 162.093554 39.133801 106.895254 89.583364 -23.968703 75.802916 127.110777 71.420124 53.381595 125.565149 22.378955 75.982249 147.819433 28.909043 -173.521155 -96.205588 61.870758 12.514207 -106.113213 -42.720175 -23.844219 113.228020 -52.374181 21.374764 -158.530046 20.190954 -128.682219 89.739200 24.659412 -0.731314 124.975928 -54.201931 -27.327242 -95.141883 51.617120 -55.778698 95.905610 -4.489230 -68.877221 177.392508 26.429752 88.786213 98.488191 -19.202936 -38.418285 -141.780530 -19.222863 40.165922 -15.069749 -31.589310 145.681825 150.414831 45.873788 -14.054586 0.000000 0.000000 0.000000 -89.464561 30.559481 -19.828003 115.940053 -6.822814 43.405732 -170.029192 -21.484991 -26.378149 -104.895059 -23.186255 -101.533941 0.000000 0.000000 0.000000 +-11868.254891 -1762.405462 -23700.184559 110.489453 -23.915536 92.277009 -105.408122 -61.808584 -96.137823 120.212123 -60.356621 -16.198176 124.680091 -27.936672 67.954503 -123.270158 -59.691387 124.346450 -97.744500 37.490099 -15.539356 -96.658460 10.655751 -32.512496 107.014154 -39.074823 45.479678 147.347809 3.867678 151.279442 95.909453 14.972143 -11.022898 103.417456 -24.349342 -33.188442 -78.119494 70.153275 42.662716 89.819813 -13.855133 -66.020667 -170.024928 -14.188601 103.497211 125.949226 -22.712753 -63.245611 -99.840671 -45.191624 -56.333444 -39.263615 0.907063 135.901210 -134.832243 -82.647611 116.338872 0.000000 0.000000 0.000000 -87.768357 18.707643 -39.629136 109.434663 -26.886261 77.122411 -153.211444 -17.751597 -42.072169 -108.052982 -16.228125 -101.295788 0.000000 0.000000 0.000000 +-11351.901460 -1557.284002 -22844.064996 -137.084355 70.535016 -158.185045 -75.495998 49.949124 -72.320444 93.812177 -56.579554 0.814309 -120.924616 -27.027491 -59.353973 -94.206313 43.131446 16.775390 -148.104800 73.117116 -30.039998 -83.898197 6.217196 -54.914122 101.484549 -34.719068 40.238449 -176.467671 23.128577 167.517393 92.324961 11.721145 -16.486453 109.120038 51.007306 -12.407278 -101.549184 54.612802 -52.511140 83.072783 31.639084 -83.053495 -70.162175 -29.068507 -49.405028 73.550727 -10.931350 24.624374 -58.059666 -43.817674 -93.725733 -177.301066 -28.025844 146.676559 -100.155872 -53.003247 34.450193 0.000000 0.000000 0.000000 -87.318553 0.052873 -18.776888 -50.178259 -1.952292 -71.253459 -138.252302 -20.026216 -63.553318 104.852230 36.384400 59.161481 0.000000 0.000000 0.000000 +-11232.432452 -948.942389 -22908.986998 -24.830469 25.505746 172.086794 -59.600042 -6.330746 56.647587 78.420150 -36.461399 29.680165 -107.762780 -6.442750 -61.682987 -42.813161 -34.187597 36.724505 105.120239 -50.797127 -13.908833 -84.225890 5.625548 -64.039874 55.231069 -71.271568 148.788321 -55.829170 5.263180 -68.637998 82.810669 25.407617 -15.383386 137.978596 49.845832 15.554979 -95.524673 16.133800 -49.351091 83.941534 0.835583 -84.695694 -69.367156 -51.833046 47.010941 105.690886 -4.386053 32.947197 -58.721478 43.905236 -53.302197 -163.174326 -24.523550 101.526025 -95.381769 -37.138027 17.448142 0.000000 0.000000 0.000000 -93.744679 -41.889783 8.772963 -72.417620 -13.482018 -44.195219 -167.566971 -6.995348 -80.434070 93.108539 27.667743 17.529009 0.000000 0.000000 0.000000 +-10936.872038 -537.197394 -23689.550781 -10.601053 -58.496006 88.482870 -74.871526 -21.192908 51.386617 85.878654 -57.929532 -16.536808 -108.959845 3.597181 -58.962229 -61.296526 -4.393600 35.171013 90.847853 -46.834633 15.414517 -80.402976 25.606436 -53.824164 95.166823 -81.476544 110.021778 62.600888 -70.183613 -178.299527 87.975343 7.119758 -3.258425 131.169987 -61.740797 16.608753 -91.937652 -2.890082 -56.823821 -43.815084 -66.504127 27.973103 -57.020174 -40.468352 7.684322 108.116729 1.671315 31.713768 44.698700 22.745015 71.627700 -167.516849 -27.212744 115.737053 -98.348796 -22.810647 7.138909 0.000000 0.000000 0.000000 -100.705692 -17.203317 5.288589 -40.513268 -18.511589 -49.837064 177.668302 -7.075618 -88.996532 95.632357 10.395332 6.433364 0.000000 0.000000 0.000000 +-10083.592379 -522.392340 -23241.044993 -59.370420 26.496476 67.871100 86.929743 46.579965 -87.751037 108.626312 43.688713 -55.953923 -105.275378 -49.405544 -64.883393 -61.908555 -40.177318 39.199209 132.412453 -9.920126 -88.083825 -39.157901 73.136084 79.053132 73.082643 -41.756528 99.282893 17.126697 -28.105110 -13.701424 98.903113 -33.714158 30.592775 97.907505 -57.128113 67.574011 99.809369 -75.552175 78.077724 -74.453316 -31.194448 -31.972462 -70.049343 -38.144346 53.207456 106.488757 19.176560 21.686504 22.802799 19.423933 17.009548 -176.335292 -25.955069 142.299514 -101.830075 -34.520170 24.669209 0.000000 0.000000 0.000000 -96.819727 -25.627217 5.038944 30.327156 -25.042384 -89.228064 169.503581 -5.543859 -84.811411 98.149137 3.399935 -1.793876 0.000000 0.000000 0.000000 +-11152.508125 -275.448340 -25268.862225 -122.936624 54.204319 -56.115304 111.943655 38.937886 -69.437321 98.933031 -2.097372 -47.460872 102.234934 27.964962 86.555316 -9.123497 4.479051 -30.723335 -124.575517 67.988908 -28.611582 100.077304 61.195723 -83.806447 -63.860213 19.575348 -87.935589 -11.491180 4.300658 9.735392 105.459719 59.067656 -45.471298 94.741492 -18.541435 32.956072 93.912419 -60.725361 76.077708 -57.546806 -28.254008 -63.145854 -64.135709 -18.566715 39.230970 101.476148 17.271485 18.543795 34.108864 1.822164 6.003643 -172.113002 -25.180388 129.734918 -103.548833 -32.006750 24.031724 0.000000 0.000000 0.000000 -87.567660 -36.179481 39.398632 19.189601 -63.014913 -46.942745 -169.569314 16.585111 97.592257 118.821695 -24.207445 -3.138522 0.000000 0.000000 0.000000 +-14667.033179 -603.582037 -23311.617055 72.413022 9.714078 -151.679628 88.298573 -43.435912 -66.576334 -103.205311 47.289675 102.024760 108.057096 41.217031 85.237827 -5.214809 21.158477 -16.524987 -78.099565 78.871105 12.492127 -141.687742 48.997018 -37.092215 57.652014 45.552138 -22.881058 10.936743 15.210407 17.546138 88.695377 29.353904 -104.121650 85.335688 -1.663633 32.698186 82.312330 -27.538324 12.743262 -25.229959 -45.222123 -45.392695 -58.066376 -11.225702 46.136243 113.506966 16.228204 20.805903 50.208701 -14.976705 -15.213098 -155.291008 -14.159208 75.275587 141.370878 51.351564 -35.746466 0.000000 0.000000 0.000000 71.618529 14.307731 -92.856135 -66.552758 -51.494116 29.587209 -175.115360 34.900352 99.648633 -121.978840 -0.285989 -17.321837 0.000000 0.000000 0.000000 +-16884.617316 -583.817755 -28501.209419 -152.404211 -14.407182 66.245557 59.035092 -75.758315 -25.420464 102.314213 -19.181809 -69.048292 101.654775 42.425084 68.815344 -11.718708 7.998180 -17.557373 -6.625492 78.461533 89.419470 84.774231 58.181676 138.496996 72.480046 -9.760044 -87.480079 -3.592693 22.426432 -0.576358 84.475076 40.759778 -96.500639 81.742851 1.922911 5.944871 82.200238 6.837501 -69.052827 15.307530 36.893868 -46.652023 -21.188192 -18.376929 -17.854118 106.255280 -17.222608 -15.549050 58.561554 5.345274 11.429622 -153.256825 -43.513702 73.496647 117.860444 16.857228 -2.304129 0.000000 0.000000 0.000000 71.399383 31.783999 -91.598997 -158.208066 74.484596 56.752237 -152.666866 59.399142 153.595620 -140.459290 -3.806654 -14.587320 0.000000 0.000000 0.000000 +-22966.582813 -866.267584 -25112.796040 111.553242 64.074702 -129.730418 75.030453 -38.554373 70.089811 -88.694421 16.422975 100.724931 104.685084 46.749743 61.355019 -4.658201 4.665930 -23.807480 -11.971158 79.478196 82.386243 93.267809 34.215060 124.406581 51.204627 -58.277870 -104.382693 12.224240 59.801319 -6.886805 80.388229 21.716416 -93.034653 79.276952 -40.975724 27.511620 50.416409 54.205957 -73.550723 2.358117 -56.686595 71.596311 -41.561334 -38.043072 -29.731301 71.130575 -87.247528 36.843847 -129.070275 -19.627178 -17.926042 -126.632806 -2.083285 46.773339 139.279454 54.105972 5.456988 0.000000 0.000000 0.000000 -51.920321 38.516025 103.660721 -174.551455 72.990645 46.500801 13.682613 73.526015 -82.973236 178.658696 -37.005711 36.066713 0.000000 0.000000 0.000000 +-26110.605116 -1531.552191 -30634.548836 17.601284 28.924732 -68.816585 78.233262 -36.040636 71.586338 -98.752134 30.960147 80.898921 86.886228 62.907793 36.916807 -19.726294 44.537648 -36.001918 32.598928 73.925337 96.947382 -102.914895 55.976082 -52.990368 -115.681000 -77.022759 9.782083 4.345889 5.040691 10.018848 68.161544 -62.999848 -89.398215 95.914280 -60.407654 44.001377 80.995907 29.343746 -3.432723 20.947978 41.324625 77.806505 -78.952001 -52.539608 49.752171 69.641048 -70.135690 -17.775369 -148.146140 -19.373729 39.256846 -119.286198 -35.840323 64.363695 142.524323 67.624046 -29.251430 0.000000 0.000000 0.000000 -12.211068 -8.287818 81.926287 -164.212802 48.160189 71.478456 162.360612 75.272975 -134.721950 -158.455816 -40.381094 14.524081 0.000000 0.000000 0.000000 +-23247.311354 -2146.267523 -33681.546843 -119.445899 -39.214265 49.581279 93.641166 -65.995054 43.602448 -101.838261 61.220465 70.890304 93.385417 70.716798 13.150662 -124.866265 1.698770 1.042316 -71.935908 77.255316 12.493891 143.214679 73.077940 -166.389772 99.789059 42.530905 88.200142 -80.301939 -25.706575 -176.161001 -70.188854 -54.705760 31.240379 90.971423 -58.991559 66.242275 86.762284 12.520842 -11.960376 -84.790489 33.301546 -23.951137 -99.510811 -34.109638 62.097864 50.128379 -65.000204 -2.050709 -151.178271 -19.382700 40.334504 -22.380731 -35.460093 -64.124164 -92.398139 -47.837416 62.039773 0.000000 0.000000 0.000000 -3.672004 15.771488 84.983125 -128.980556 27.542001 73.612578 -125.491179 53.668998 -55.287938 -78.823374 -32.099071 -63.120185 0.000000 0.000000 0.000000 +-20619.506514 -3339.216742 -29773.770973 -155.474269 60.535924 62.951654 89.786039 -55.127619 59.049911 -117.055451 68.076602 44.506573 103.245461 80.620504 17.378415 -9.312609 -23.955248 -6.881585 -91.230646 38.556206 -14.707428 -112.366595 51.217274 -50.211689 98.408261 -18.789782 47.403298 -78.688165 37.063788 -142.342791 35.216129 -59.369573 -42.070992 91.613021 -53.429089 69.006566 86.341934 -24.794873 17.500492 -80.060589 19.978788 -2.911807 -98.190154 -17.963512 57.001495 54.630074 -64.926262 -9.474950 -150.042735 -21.040785 39.119361 -20.426525 -23.962991 -86.309897 -100.460997 -43.911032 71.778612 0.000000 0.000000 0.000000 51.481386 12.128440 64.987217 -106.560025 -57.134658 -21.270991 126.037603 66.134647 -162.629035 -99.385427 -10.773055 -27.506113 0.000000 0.000000 0.000000 +-22756.487115 -3083.143499 -28497.840976 -133.875218 -33.884460 1.780292 102.508072 -65.479457 57.249399 -132.197995 63.158492 21.446282 114.873853 60.893430 43.837676 -3.872130 -13.742856 -13.109381 -91.930841 -7.489667 -15.273642 -112.797774 47.441244 -55.951777 107.437038 -8.199541 36.134189 47.426220 35.761541 -17.580022 101.504728 52.657481 -24.636689 81.684125 -36.168232 61.150759 84.940657 -33.198942 -50.192062 -79.202102 48.923119 72.584294 -104.192975 5.088274 16.961086 71.747660 -84.816200 -12.556530 -153.562464 -14.854217 41.164005 -40.811358 -3.875607 42.591301 134.308308 7.092683 -11.566342 0.000000 0.000000 0.000000 -14.220144 5.027142 59.544029 159.518237 22.784142 90.952095 -23.637934 -56.180193 -64.891979 -158.453079 1.110744 -32.010833 0.000000 0.000000 0.000000 +-20874.788350 -3227.938910 -28314.055459 -37.888579 -9.013914 -145.891351 105.736068 -55.465026 41.801882 -94.638909 81.948484 51.094612 110.347391 50.029055 53.891774 -17.427900 -13.088888 -1.360462 -93.454373 -14.734164 -12.039206 -157.706574 54.026136 -102.324195 108.254914 -33.400062 21.934347 55.045688 21.947274 -13.690618 112.190404 54.084366 -23.304552 82.674797 -25.060573 57.937058 84.762079 -47.278931 -68.493803 -64.247034 -31.318313 101.303317 -111.638398 27.130494 -17.115103 12.784657 -65.745898 84.377218 -155.768210 40.132317 55.714693 -41.105345 -32.304605 0.085249 128.598991 -4.723411 -5.814304 0.000000 0.000000 0.000000 -35.706510 32.521134 72.137167 -82.401243 -67.350533 10.730307 28.108356 -60.113018 -119.341167 -148.650952 0.877958 -23.081130 0.000000 0.000000 0.000000 +-21849.027397 -3561.169134 -29327.326998 -118.719791 -32.020384 92.649799 91.666487 -64.280114 58.776071 -161.578439 69.224696 23.850196 111.478199 40.359045 64.443873 9.088746 -11.052296 -7.301742 -95.147710 1.667947 -10.627573 99.629653 1.938474 122.937475 113.831874 -27.028646 12.102018 52.628843 0.454646 6.946211 -114.350636 28.887301 83.355623 80.615407 -45.107643 74.790912 -90.264120 0.570570 86.096898 90.158188 -59.329579 -44.429379 -101.839410 -20.781871 -85.843362 5.909851 -62.716158 82.865209 116.743134 73.253005 -41.064558 -51.956978 7.058643 3.400943 128.823883 -9.502651 -3.675867 0.000000 0.000000 0.000000 -50.113967 56.373331 60.589997 -96.824045 -47.547391 16.605890 80.736846 -4.580198 -121.716060 -158.320703 -7.563435 -24.593917 0.000000 0.000000 0.000000 +-22000.984427 -3808.682562 -28324.749117 153.021997 -8.353234 43.098729 88.075695 -64.777976 66.216037 -110.984472 10.689692 79.145780 116.304733 35.941589 79.142692 16.286326 6.893299 -0.820169 -96.921544 -2.459299 -12.118679 -174.381854 76.217961 -127.530551 142.650467 -42.582183 -35.831206 38.805491 -7.516707 10.936679 -107.021951 -42.259883 72.462160 75.842289 -59.995189 51.340981 -92.201828 7.267267 84.575070 91.928809 -58.956867 -40.872708 119.748985 -46.164121 60.205980 21.500387 -61.373791 69.170442 122.302016 67.969220 -31.817779 -53.739955 2.432667 -16.240170 126.087062 -7.670787 -0.112684 0.000000 0.000000 0.000000 -43.708802 50.805188 58.368967 -93.115094 -68.977876 -24.678900 80.723319 31.175179 -110.859149 -159.280388 -8.846622 -19.274628 0.000000 0.000000 0.000000 +-22286.202580 -3667.573187 -28435.356925 61.343116 65.367224 0.649274 93.029970 -64.938087 60.578041 -111.648278 24.899418 83.327367 106.920536 -20.523573 79.181755 9.301447 -6.322301 -4.451625 -95.979522 0.916387 -11.794950 -176.090029 -72.201411 44.826717 141.629251 -23.661668 -56.772158 26.492595 30.271829 -19.644564 -128.763722 -50.157389 58.882449 75.858619 -53.182359 64.576257 -91.482227 -25.007481 83.072633 95.741457 -35.819140 -43.299294 130.519889 -29.794554 47.453033 13.781022 -60.384740 67.971733 99.133364 33.745904 -63.656973 -52.670004 -13.893745 -17.902478 127.393505 -12.335383 1.260302 0.000000 0.000000 0.000000 8.415435 83.214424 70.860479 -84.476511 -56.413099 -45.794872 74.088182 33.179027 -107.197219 -162.510394 -4.467156 -19.911189 0.000000 0.000000 0.000000 +-22269.699564 -3039.583441 -29266.604185 -21.846159 -3.297312 -87.156079 98.764238 -64.106671 57.289731 -102.325262 -4.197661 90.377700 108.171583 -34.713623 75.623703 9.297201 -15.491312 -2.952437 -95.032031 0.109276 -8.755025 178.644550 -1.220047 -92.774062 124.839017 -13.501736 -65.527370 18.093102 18.319025 -12.423370 -123.450016 -29.505247 66.653820 73.106435 -63.221593 26.401038 -91.864413 -12.567769 83.288931 95.135187 -13.955877 -50.212586 148.158830 -14.664061 9.679788 4.654237 -45.802436 67.719422 99.956356 52.375466 -69.450595 -53.668396 -22.733581 -23.629035 126.712745 -15.811447 1.259979 0.000000 0.000000 0.000000 26.462367 66.213979 79.629080 -107.567528 -55.498319 -42.866386 56.808607 26.119020 -106.201738 -160.371477 -0.666375 -20.996771 0.000000 0.000000 0.000000 +-22197.241340 -2177.229925 -29441.793087 96.805584 -51.024585 108.025343 105.923635 -59.794416 32.984693 -96.907219 -7.249402 104.035648 113.068982 -13.446770 78.032037 14.889971 -3.500086 -1.749846 -97.672811 -15.628157 -8.869224 131.281231 -31.579397 20.484917 135.637222 -15.352311 -105.537906 12.295510 17.684709 -16.328178 -115.762121 26.851281 101.078675 95.628568 -50.177639 -4.123433 -90.973989 6.793492 69.838883 97.812018 9.558593 -49.941175 160.436427 12.303676 -10.071236 11.434242 -41.366937 88.353967 98.417167 25.814047 -75.350757 -41.455754 -41.835624 -40.967709 128.165254 -15.840573 0.170851 0.000000 0.000000 0.000000 -15.277586 67.529548 48.003881 -99.846561 -46.318674 -55.010435 45.032744 25.746635 -103.295526 -162.414954 2.101359 -18.434499 0.000000 0.000000 0.000000 +-21786.675289 -1776.394232 -29893.333405 30.530964 -37.260005 -99.014855 96.960615 -51.922093 36.727887 -95.912618 -25.364163 76.571395 114.797817 27.640729 85.144396 16.745417 -9.186749 2.893237 -96.793673 -11.819607 -1.713166 118.430372 19.653507 94.038333 -175.940480 -38.892647 154.462768 17.419571 12.016842 -14.966256 -108.181560 13.791956 93.836097 111.075579 26.094921 34.801936 -94.579238 28.322440 28.234049 102.449098 6.519244 -51.528013 169.822952 15.735748 3.626957 56.034098 -2.367001 36.072086 91.865173 -14.410124 -76.382673 -44.224803 -44.561012 -36.837753 128.431340 -14.844585 -0.986236 0.000000 0.000000 0.000000 -14.588902 58.384558 61.219904 -96.415224 -41.965984 -56.802105 27.967867 31.284464 -101.644379 -162.105082 6.211686 -16.849730 0.000000 0.000000 0.000000 +-21154.226856 -855.321234 -30136.751902 137.679122 -53.907337 112.376666 96.047569 -64.057769 46.867782 -88.561538 -4.790147 94.504008 110.427193 10.174693 87.222065 25.525975 2.961616 4.227573 -103.823041 -19.783675 3.105292 127.030526 42.538275 123.210716 160.526399 -41.990566 -179.883686 -8.252989 13.824088 -30.671061 103.338969 45.209572 -35.295779 -162.034243 51.123258 85.001166 -92.983200 50.693737 36.397758 95.767476 3.404950 -66.198178 -176.159441 26.671884 3.020234 57.770028 14.571300 30.181364 82.223656 -7.090133 -73.041818 -48.438430 -40.614813 -47.919819 129.181367 -11.541369 -1.634535 0.000000 0.000000 0.000000 -85.989021 -0.915014 -34.112415 -80.850363 -12.252503 -64.879726 26.830046 41.337042 -94.617732 -161.176165 6.543422 -5.384111 0.000000 0.000000 0.000000 +-21480.645773 -442.712896 -29448.366508 30.709286 -7.379820 123.215565 89.467615 -54.329661 49.651688 -81.427020 -46.543712 84.161721 137.570050 66.166723 137.760131 20.252037 -2.001742 -1.870840 -108.247008 -31.526099 16.901260 84.040850 60.280829 87.175362 115.498901 -0.327839 -88.986115 -8.011690 13.840381 -18.760735 94.377971 4.391714 -74.378617 -146.671312 -6.291236 81.257357 -92.168713 53.816558 22.969984 91.958620 12.174326 -74.875388 -159.166684 24.963555 60.072939 84.375534 3.128778 17.391508 79.086578 -12.364651 -69.685616 -32.495081 -51.481897 -53.237091 131.217699 -13.017855 -1.035331 0.000000 0.000000 0.000000 -82.824572 -3.038004 -49.878918 -67.491040 9.718922 -62.876910 21.763628 38.282009 -92.335605 -159.233634 9.479667 -3.736498 0.000000 0.000000 0.000000 +-22170.339960 -0.316556 -30764.625059 60.856820 -77.500022 106.318673 -51.428197 -77.822084 -164.339268 115.259850 71.665807 101.418921 -106.163990 13.206979 -83.991141 10.722826 -6.526524 8.228290 -128.292969 -64.595977 12.483527 87.686485 -0.985511 94.789281 100.658735 39.497689 -14.324656 -11.891894 5.164626 -18.438176 82.641363 -24.221883 -84.061657 -155.907023 -59.692770 44.452693 -91.347717 58.619222 26.718344 88.484984 -0.570579 -66.415811 -168.306699 51.816367 49.822448 86.150375 -5.503452 0.532832 82.497853 6.825664 -77.771156 -24.940635 -57.062320 -82.601631 129.512995 -11.692244 -1.206027 0.000000 0.000000 0.000000 -125.838796 -26.868050 -5.933783 -86.073372 -12.037414 -64.101147 23.155105 39.813115 -94.405032 -156.119106 10.997599 10.457155 0.000000 0.000000 0.000000 +-20751.762897 126.152834 -31430.218977 -143.563626 19.456580 -89.533372 -41.643339 -77.322978 177.474305 82.143363 65.078825 59.633746 -104.750509 19.779427 -69.592282 33.328064 -3.367286 16.727350 -156.716884 -80.437078 44.140070 -89.025721 72.335720 -75.118695 80.812697 20.199285 -25.907115 -21.751956 6.275994 -12.499927 79.434769 -47.439846 -90.483977 -104.507068 -37.428485 81.162823 -88.095420 42.667400 52.026032 88.017346 -32.794057 -50.177221 -144.094811 44.073506 76.203758 90.622082 -3.364708 -28.253407 96.809735 -3.631962 -87.428323 -50.147175 -25.585876 -83.345452 126.231156 -9.163867 -4.223063 0.000000 0.000000 0.000000 -122.467294 -11.045753 -7.661010 138.884925 -24.137625 55.521590 26.853317 38.773089 -96.293392 -149.040035 6.619051 21.977150 0.000000 0.000000 0.000000 +-21086.179880 84.288790 -30848.972206 -112.715299 -42.510044 -1.249932 98.016364 30.911886 43.494020 84.247896 61.875561 42.753144 -114.037503 35.472900 -75.285644 57.249954 15.381487 -8.574373 -41.001322 -88.137239 -47.958836 -80.487859 58.345617 -64.664091 24.789875 21.693239 -57.526935 -33.985962 12.713629 -7.819663 84.583420 -9.300813 -101.056968 170.514670 79.307958 16.232762 -88.289591 20.976378 79.352565 86.958364 -22.413895 -41.537584 101.026136 8.219956 -63.883615 91.511040 -6.397974 -46.614409 92.993798 -18.401652 -87.496770 -47.763281 -15.029180 -76.929952 128.484397 2.911201 -10.802828 0.000000 0.000000 0.000000 -118.605469 -37.811903 -12.666245 130.400766 55.917186 96.049064 17.834396 36.044946 -96.774733 -144.375848 0.823924 40.039337 0.000000 0.000000 0.000000 +-21751.871258 211.243601 -29101.585185 177.970802 -14.938682 148.507999 110.312481 66.848804 45.745107 94.919740 59.362548 58.490553 -114.209763 36.718901 -105.023047 29.523354 8.816945 -5.587607 45.025481 -72.334982 -165.209491 -59.347438 72.732191 -42.672052 19.274703 -4.592502 37.702085 -17.877915 24.221178 -12.038961 -63.999769 77.203590 82.208461 -144.566994 69.598413 62.218275 -86.697459 35.554650 61.155173 84.316406 8.243435 -17.460625 89.885143 74.536016 27.072052 81.044465 -6.788408 -37.916406 76.037664 -24.169471 -84.016125 -33.755395 -39.730281 -58.410398 -111.081671 -8.083277 76.120098 0.000000 0.000000 0.000000 -85.451012 -23.635614 17.531230 132.918558 11.853500 48.641820 -106.675389 -71.371942 -56.175612 174.517396 4.143040 47.798173 0.000000 0.000000 0.000000 +-23905.917942 -36.115477 -27569.912541 -80.800027 -37.464395 -140.862628 104.594310 59.173202 64.566359 -121.606828 77.594772 -115.973548 106.485271 -31.820822 32.438719 -159.520675 -5.010044 32.381390 -32.379895 -66.117289 -112.799201 80.953444 -29.472071 108.424672 52.309358 -8.454827 73.644814 -40.614102 10.616688 -16.929494 -72.692436 68.092353 79.627685 93.973307 36.141288 -47.869034 89.493805 -53.570031 -64.720426 64.730717 -63.637888 65.087492 -32.013015 58.433510 -61.377234 78.167043 -12.148775 -46.819993 110.238560 -32.277634 -105.188313 -114.158000 -68.878342 60.200371 -84.774865 -59.899906 15.765684 0.000000 0.000000 0.000000 -82.542288 -33.227111 75.619792 96.900387 -18.945156 -60.154643 -148.622821 45.978557 -108.594411 113.028679 1.891309 23.707410 0.000000 0.000000 0.000000 +-26526.648717 -122.639280 -29091.301879 121.299184 -31.194970 75.861739 -91.624880 17.361030 -111.114729 -119.217821 65.414859 -60.271762 113.744042 -35.423430 26.751965 -25.419628 0.273230 -32.767398 -53.106618 -68.040837 -86.102856 84.159116 0.396221 94.967011 53.429184 -14.969627 69.492732 -24.460415 18.308014 -21.899172 -59.957614 84.136428 106.207591 92.414276 20.925189 -55.509076 90.674975 -18.843950 -75.039007 23.608466 -49.478765 70.683354 1.446885 69.482553 -31.326420 75.174924 8.256043 75.386500 50.347110 48.572981 36.792504 -147.569161 -49.643968 105.420936 -89.735096 -39.817380 -7.793187 0.000000 0.000000 0.000000 84.525473 51.632051 -97.142488 94.526753 -53.396372 -69.542828 75.968722 18.346853 105.014981 111.941950 -7.003809 18.203112 0.000000 0.000000 0.000000 +-26715.630304 -153.625845 -27815.998927 -133.786303 -72.707871 -176.373267 -88.065008 11.488193 -99.740539 -93.797076 54.516885 -20.871971 101.953188 18.655023 54.797037 -69.227901 -64.617466 -148.674476 -66.815506 -67.043446 -110.100864 77.004362 -10.789696 114.110878 -106.536066 22.520986 -78.170799 119.410961 6.631679 129.940486 -87.100377 72.953471 67.033054 88.905994 7.730301 -50.780435 91.694520 -34.655818 -64.820879 -86.246825 23.041357 -37.771987 -35.043574 -3.299256 -74.872283 80.214729 -15.664594 60.003425 -144.446830 13.293614 74.646889 -169.347176 -59.866683 139.222204 -71.817067 -58.398118 -6.175226 0.000000 0.000000 0.000000 90.584260 58.533836 -90.872750 -88.783193 8.410582 108.973860 90.101134 -17.330211 70.045144 114.581198 3.237392 21.769065 0.000000 0.000000 0.000000 +-27051.546964 229.930551 -27758.999029 70.756829 14.050569 107.752304 168.636489 71.684420 83.876402 -65.801557 80.512094 62.565261 -105.023092 -34.310197 -109.691639 -47.491030 -10.200763 177.962490 71.374284 55.751955 36.192532 79.421727 -45.546891 46.746813 -90.452462 -9.883326 -41.875067 149.448026 33.576503 -9.659536 -74.370407 71.945608 96.721533 93.181484 13.544804 -42.702162 92.741337 -37.198957 -62.485780 -88.034561 -2.360151 -64.762948 -47.078949 20.705258 -81.120587 70.419505 -15.646244 70.413883 124.769086 -30.172245 76.695969 143.935998 -51.252844 -153.920252 -54.911766 -51.175068 -26.094531 0.000000 0.000000 0.000000 91.738505 13.156181 -86.540628 95.924652 -1.170258 -67.883350 111.735619 75.105209 55.532040 114.155185 15.030881 32.918362 0.000000 0.000000 0.000000 +-27188.278545 135.322519 -27884.471920 29.672297 -59.371913 -32.995702 -86.568296 66.120018 -110.312233 -84.626561 82.499735 65.044952 -70.681811 30.007293 42.900920 -170.450730 -60.431812 -53.024050 80.952076 20.248133 27.379619 99.508901 -29.863028 7.199440 -92.396871 0.450958 -37.413828 -131.123345 -15.452926 2.701979 86.545458 52.996357 -76.738717 94.029849 20.534858 -28.548611 92.878147 -35.180241 -53.506237 -87.351857 35.018540 -45.722781 30.059978 -68.624372 -50.136466 78.117622 -29.448754 44.512629 167.010796 -29.510818 52.614894 -154.175356 -41.981739 114.248737 -72.862400 -56.838310 -8.519533 0.000000 0.000000 0.000000 -96.484191 63.594676 78.595212 108.757452 24.246607 -60.546079 79.965227 4.558494 92.998790 119.015155 27.570042 31.727554 0.000000 0.000000 0.000000 +-28355.313247 238.325327 -28652.031600 -172.423612 48.409477 -177.659712 141.597314 81.064127 93.025894 164.146471 77.943572 -52.332662 -71.532353 30.274032 33.685205 -175.178023 -18.730293 -24.497392 83.294364 7.099596 16.500423 93.596902 -21.265442 42.853007 -91.114795 -32.389814 -18.465703 -142.171081 -49.021318 38.729746 22.150919 59.532466 -32.146261 139.060040 72.897048 2.427245 97.824802 -42.944443 -61.580101 -86.907492 46.106921 -58.975987 -68.929146 -52.396343 54.446061 79.947115 -21.965707 26.238896 -168.254195 -32.524835 32.108347 -143.006511 -10.923502 87.321928 -69.253528 -66.827458 -17.290714 0.000000 0.000000 0.000000 -86.315222 27.643513 73.055717 123.178810 15.302446 -47.561503 -109.307408 32.706479 -101.877317 158.957918 38.498631 41.465317 0.000000 0.000000 0.000000 +-29240.431456 308.941767 -29787.710934 78.398361 -37.421628 -93.615263 126.316624 54.536467 75.466785 -66.677395 37.551818 137.907650 -73.502264 33.991428 79.217179 -160.120003 -5.469411 -15.590771 86.179435 20.720356 14.584862 99.680132 -8.253878 43.327073 -88.567183 -3.890312 -27.287483 -152.190491 13.674832 23.838601 84.792860 63.231489 15.436581 -102.717089 3.080463 87.008663 94.203660 -29.804825 -59.152278 -80.708807 -78.438308 -98.352022 -99.891564 -42.408947 64.398190 82.522682 -20.620837 -1.574196 -169.749250 -43.574274 33.407706 117.673593 43.241591 78.004806 -170.067974 29.888496 57.224149 0.000000 0.000000 0.000000 -85.589982 33.459199 49.773525 136.548115 8.917433 -34.994736 -128.559100 75.552777 -49.182922 126.264452 7.510500 68.125315 0.000000 0.000000 0.000000 +-28864.656221 198.191416 -28948.513042 -130.090081 -30.164952 85.998906 -92.181727 21.288318 -104.075589 -125.072427 -84.319621 -43.038532 -77.587553 30.943230 64.593299 -160.140556 -5.553237 1.649223 88.792660 34.625723 42.680271 100.969086 -25.276338 47.053174 -143.525461 -78.636016 -72.215801 110.557885 37.487939 -105.055078 47.896026 78.742759 -47.249866 -100.924788 -25.926010 83.883706 92.862179 -18.257173 -65.775574 37.411572 -88.932985 129.699427 -105.358745 -9.477737 68.493549 83.490734 -17.509350 -17.469525 -173.259388 -37.695607 54.333921 84.681548 79.999395 64.015442 135.492759 42.536534 31.049269 0.000000 0.000000 0.000000 -78.113637 27.521400 34.749798 147.509123 -0.575936 -18.807541 -93.598997 14.206076 26.254546 174.220636 -72.089138 57.253887 0.000000 0.000000 0.000000 +-28859.982760 214.159922 -27194.596339 170.057000 41.093312 121.501548 123.246391 -40.870190 22.380764 -95.719455 11.821839 -179.840430 -134.457475 57.316184 34.475789 -162.371126 8.130015 11.552976 -81.050781 -72.982852 -112.705581 108.600941 8.979218 71.412067 90.087380 -40.189550 57.292057 -125.085718 23.361111 42.011738 -102.821091 46.142349 114.804334 -95.731218 -36.399351 78.900013 93.016816 -17.306834 -74.114749 68.845552 -82.987352 68.701949 -136.469695 52.073677 48.899358 86.950242 -17.021720 -27.283526 -174.868942 -25.851103 69.723888 92.817880 63.574666 73.776657 130.463458 20.626205 18.180927 0.000000 0.000000 0.000000 -81.839269 20.765773 5.800700 156.068208 -3.923744 -1.510903 -107.014554 4.452046 17.093184 -78.559243 -74.556856 -87.911220 0.000000 0.000000 0.000000 +-31394.454566 -126.878599 -29067.766251 59.256958 -9.206187 78.006535 96.287355 -39.818060 46.243218 -98.714262 54.133824 -120.372280 -125.647197 -50.988503 25.747058 179.816863 -3.036181 -8.113006 -85.387825 -35.563803 -103.698986 111.191747 27.843601 94.488675 85.313022 -18.370734 48.938010 120.361654 -68.874426 -106.717511 -108.068159 -20.599749 54.946965 -123.817595 82.197156 66.096645 94.599333 55.348318 -86.399890 64.146563 -81.966205 66.353911 109.800203 -8.818160 -29.683523 91.419334 -16.135644 -29.334888 -160.637410 -17.188836 83.045533 -70.190184 -81.464720 -123.128776 128.098910 20.214216 14.875797 0.000000 0.000000 0.000000 -76.216619 21.618900 11.741907 153.568150 -1.613057 -0.702383 -100.359408 32.191860 24.943652 -88.713823 -77.669501 -84.680938 0.000000 0.000000 0.000000 +-30903.449557 -446.078283 -29454.405300 175.701673 -5.971439 -128.837930 103.876498 -27.548105 28.990899 -165.882680 78.123497 111.767130 -134.321082 46.383571 48.394504 169.775387 -17.767506 -65.878259 -88.931021 -52.120277 -83.846776 125.440785 44.567109 129.274998 85.276249 -13.757050 61.345089 46.722428 81.861742 36.719780 -93.114311 2.483616 79.656868 89.362302 7.194169 -90.227889 94.055197 14.538293 -83.406502 -16.304738 -81.444168 88.252739 124.414375 -27.195227 -38.495233 77.713646 -29.415502 -18.347627 -162.050021 -20.696418 77.754949 105.876200 -31.639723 70.087210 130.366022 29.250797 18.405221 0.000000 0.000000 0.000000 -68.442914 29.416846 5.031745 152.845673 -1.944159 18.745180 -92.970418 13.802572 28.606504 -62.773369 -76.319895 -116.603954 0.000000 0.000000 0.000000 +-30462.015960 -791.248175 -28810.904430 -138.088273 -53.683319 -84.461695 99.216383 -30.454039 20.435567 -152.432951 35.049580 81.388440 175.354254 78.941700 17.128384 179.053661 -34.649214 -85.798649 -78.745892 -36.624700 -86.464511 97.911872 -21.072004 99.755153 75.494082 16.929556 77.696720 -176.340371 36.855275 140.649262 -108.562505 -24.714660 50.993207 130.570759 -76.635515 85.448065 -103.205948 -18.540551 83.121238 14.804171 -74.611084 80.418999 104.304183 11.092197 -45.702073 85.530927 -34.346094 -18.694384 -165.129475 -3.655776 71.241251 120.221174 -55.307823 55.243246 -179.080298 50.504101 47.974466 0.000000 0.000000 0.000000 -77.939448 24.728932 -0.577187 153.553399 3.123832 29.376246 -109.785548 -20.864680 17.521767 -89.362689 -73.899440 -95.641043 0.000000 0.000000 0.000000 +-30428.372304 -738.574225 -28610.990363 130.388222 33.580299 -79.828212 102.810715 -7.447369 32.951232 -152.317087 -74.148793 -8.696539 151.328485 39.534204 73.608198 163.230357 -5.303695 -37.584922 -83.934362 -51.090973 -60.168180 99.393126 -40.817200 19.120655 98.324709 32.480234 70.698872 -112.937214 -9.598877 178.208777 -96.164753 -8.070867 81.859545 96.724038 46.068689 56.827698 -110.514173 -69.135513 124.552779 -16.754498 -64.757320 95.742000 -148.380390 13.550656 55.016030 45.007562 -60.819436 28.397617 -143.552007 11.896738 51.004751 -74.635545 -64.948564 -97.843688 102.433152 65.078894 -68.989092 0.000000 0.000000 0.000000 -60.533960 51.004452 27.028005 -149.592824 43.951981 75.870327 -112.960842 60.743647 -145.109594 -87.712975 50.105823 -100.035740 0.000000 0.000000 0.000000 +-30401.771343 -875.095343 -28897.866437 122.940243 -12.399937 -155.331106 110.471440 -35.066182 28.020780 -88.477089 27.570644 118.894059 125.740392 42.422779 38.864268 -123.828819 -17.661364 -177.016870 -98.168705 -56.163492 -33.503522 145.298208 -41.909767 -23.015122 108.312052 22.111114 88.218166 -20.125700 -67.162150 80.089722 175.917086 67.260992 40.185702 96.705759 22.223760 64.551865 137.300550 -69.315859 -98.333472 -50.849485 -64.489341 93.573752 -130.832156 -23.166903 67.578785 57.373480 -62.629223 22.303148 -166.376404 24.257326 59.999044 -167.857979 -57.775600 40.566945 92.116071 82.863732 -91.935466 0.000000 0.000000 0.000000 -71.617069 54.432384 15.196562 -177.177735 22.225335 81.496937 108.287670 63.980806 52.270041 -156.666183 88.056841 -170.902228 0.000000 0.000000 0.000000 +-30095.946163 -338.412665 -29162.134090 -112.531568 -15.944528 -71.716254 78.166097 72.424161 49.423027 42.684126 54.088965 103.915654 121.183307 46.908500 53.400625 23.117918 -73.431729 48.628589 -106.075775 -69.577572 -30.198364 65.830625 20.595368 94.141594 88.627120 -2.780421 62.852000 -14.394623 40.280648 4.433186 93.187992 -55.667334 -133.005824 91.575408 66.346568 54.159550 90.734360 15.226214 -91.845610 -47.117992 -51.692988 97.839405 -140.239243 -57.266305 54.819505 84.723279 -56.961093 -15.135201 -170.071846 -17.163665 70.950246 21.294265 -56.840863 -107.769306 79.269934 -9.839062 -99.114296 0.000000 0.000000 0.000000 -61.186231 50.537226 7.915795 174.552718 67.287710 110.977786 173.148543 50.913804 145.186626 -76.916712 83.079497 -93.295394 0.000000 0.000000 0.000000 +-29240.459022 232.443537 -28119.175912 -26.560830 -87.329743 -47.556044 96.930019 76.298508 48.247010 69.218082 -60.053053 123.761510 109.912981 19.293949 63.068354 48.836485 -24.463323 -9.653702 -147.608425 -86.242180 -4.199808 73.838248 -2.311631 93.445972 64.715762 -23.591303 88.251859 -58.728295 -4.902708 -21.943990 -72.370371 73.610199 54.233056 75.309375 73.191130 -9.667340 96.196693 -65.904408 -78.248248 1.926516 -64.937761 87.582619 30.398126 -60.213286 -82.247800 74.744650 -29.027000 -22.708614 160.810858 -59.199375 -152.287906 -143.393505 27.833952 77.748847 76.091498 53.538368 -60.726642 0.000000 0.000000 0.000000 -68.232760 20.332548 -1.226131 158.036657 23.977883 63.838200 -135.299253 7.921661 -91.803524 -84.631800 -10.516461 -92.405680 0.000000 0.000000 0.000000 +-30506.840063 782.688349 -27592.495011 -104.382888 -31.592880 149.165404 96.272881 80.488882 31.728726 96.092774 -71.738162 121.402182 -74.973238 -43.086466 85.783676 147.253241 8.350756 31.248037 56.607107 63.313045 -24.526446 88.685562 -31.674236 30.160018 23.212988 48.022735 43.455419 113.996276 -32.266376 -174.445729 -76.248849 54.714647 47.747209 92.750220 74.946618 -28.843727 -91.129928 -30.954900 88.854241 23.294013 -73.255494 64.122760 65.410014 61.727130 -54.749085 69.571316 -28.466209 -7.051323 -173.560050 -32.084602 93.625912 154.902848 30.962912 72.477716 60.858977 -15.427797 -95.735065 0.000000 0.000000 0.000000 -65.220301 21.673119 12.931454 154.542373 18.557170 40.088244 -132.731502 -16.614841 -26.359570 -72.824962 55.884992 -93.010269 0.000000 0.000000 0.000000 +-30116.599410 826.657404 -26547.082085 -39.514079 10.928523 -57.982947 110.253076 76.695197 39.542426 84.297477 -47.829092 133.531892 -83.089546 41.600827 84.516427 -144.567106 -53.444031 -87.922459 60.751696 64.179749 -0.680538 95.074038 -46.536764 58.701967 -0.530359 64.218441 12.177390 -163.083616 -34.311902 68.349204 -73.319418 68.369814 62.657076 91.562987 59.857457 -56.374878 -89.770318 -16.607380 89.620460 153.336970 -9.464112 -54.460709 76.562814 63.377763 -11.388257 76.844743 -11.518080 -20.932089 -157.479801 -40.411673 124.948915 112.513558 48.404313 70.488955 42.546067 56.387045 -71.633124 0.000000 0.000000 0.000000 -77.263920 5.212392 25.035316 145.927716 15.023154 -1.874082 -122.547642 -28.930278 -15.180304 108.493345 45.764340 74.607434 0.000000 0.000000 0.000000 +-30105.415779 565.370600 -27148.858455 -177.739293 -23.394414 -22.931070 119.153822 76.246138 23.430544 100.000122 -78.222870 120.143626 -69.241596 29.173016 93.932397 169.723918 -20.214092 -22.213046 72.581201 61.829649 10.565863 111.051061 -66.972744 -25.808326 -84.528784 35.659150 -52.716751 128.687272 -27.721583 -14.950669 -77.283194 77.872263 40.424804 95.338493 64.321424 -49.358986 91.777180 -24.916666 -81.075570 -150.124510 -64.753046 -17.812143 65.380431 73.485942 5.588271 68.732547 -9.111467 -12.011672 -145.095457 -29.760394 103.997724 142.706163 26.423914 78.012116 72.940194 70.051478 -35.315923 0.000000 0.000000 0.000000 -80.006911 14.317662 45.487727 126.499819 17.385026 -34.457272 -119.787852 -14.526322 -32.112992 129.335234 69.582175 77.562853 0.000000 0.000000 0.000000 +-30704.714415 189.781290 -27469.355693 109.314699 58.446659 149.701224 138.410660 75.603368 48.334999 -112.583764 69.867281 -54.513152 45.686204 84.898111 -124.339486 -85.646253 -14.335291 173.808523 72.381008 71.822931 40.690668 137.503066 -62.195689 -26.540834 -90.765933 9.342215 -71.354933 152.083726 31.250131 29.196763 -153.948737 88.675086 -10.807131 100.403774 60.473355 -48.625655 -124.189807 84.848050 56.050878 158.809171 62.379818 -117.627407 -71.981678 82.419118 -102.429878 72.108005 -11.192504 -8.613676 137.991518 38.181562 78.765150 -168.406021 -7.481787 88.750561 165.645204 65.586189 14.561587 0.000000 0.000000 0.000000 -83.299159 6.457292 47.538321 117.598580 34.314205 -49.838897 -122.257638 -28.310179 -47.655228 123.559223 5.648410 78.094334 0.000000 0.000000 0.000000 +-31136.999740 -277.878759 -26774.823238 58.296979 -37.580454 25.127870 155.574772 78.033613 65.735532 89.095731 67.280741 141.957671 35.880644 64.595317 -108.576815 -12.271198 -30.445259 118.022495 -87.313305 -76.848082 -57.814043 116.971271 -77.027740 -29.933771 87.756606 -74.911415 90.599770 135.362122 -85.823947 44.213461 128.223115 -54.185080 -50.346359 98.485557 49.707984 1.197269 83.577888 80.816397 -93.177291 -100.731737 14.277061 8.846035 -88.945446 -38.246013 11.932700 127.334495 20.820737 -71.906185 146.585956 -49.274081 52.094936 -72.591804 -45.262287 18.612925 -105.940232 -10.628931 58.246079 0.000000 0.000000 0.000000 -119.274272 -19.334910 30.612414 -71.153392 16.798856 90.324484 -107.444250 -1.340622 -64.094352 -118.875641 -34.326046 -2.398310 0.000000 0.000000 0.000000 +-30622.898336 -404.597998 -26738.720290 131.943128 68.973567 -179.407068 166.202655 74.851012 69.155821 -128.823728 -81.853759 -21.441763 82.355492 72.795875 -28.544897 -17.984138 -15.662968 70.660487 -96.680092 -72.338967 -37.893793 108.947804 -80.915105 2.726247 94.846776 -32.361944 77.219718 125.821621 -29.461831 36.337874 92.925167 -62.816478 -1.547197 100.434379 14.628870 53.981768 -101.743981 47.030268 -29.236735 -140.512618 67.489836 -7.882169 -108.124385 -29.357816 -29.409901 121.176856 15.491930 -69.872939 -106.140004 -34.190756 -62.076786 -85.616794 18.353755 63.110084 105.365352 20.653368 -91.617194 0.000000 0.000000 0.000000 -98.491308 -19.977875 -22.822281 -64.885919 49.769067 -63.105462 -142.516391 -30.978666 -45.212481 -120.133075 -43.550006 -12.710750 0.000000 0.000000 0.000000 diff --git a/recover_visualize.py b/recover_visualize.py new file mode 100644 index 0000000000000000000000000000000000000000..09d72c235be7a0f3c2910fdefe2541e42f223975 --- /dev/null +++ b/recover_visualize.py @@ -0,0 +1,183 @@ +# representation: 272 dim +# :2 local xz velocities of root, no heading, can recover translation +# 2:8 heading angular velocities, 6d rotation, can recover heading +# 8:8+3*njoint local position, no heading, all at xz origin +# 8+3*njoint:8+6*njoint local velocities, no heading, all at xz origin, can recover local postion +# 8+6*njoint:8+12*njoint local rotations, 6d rotation, no heading, all frames z+ + +import numpy as np +from utils.face_z_align_util import rotation_6d_to_matrix, matrix_to_axis_angle +import copy +import torch +import os +import visualization.plot_3d_global as plot_3d +import argparse +import tqdm + +def findAllFile(base, endswith='.npy'): + file_path = [] + for root, ds, fs in os.walk(base, followlinks=True): + for f in fs: + fullname = os.path.join(root, f) + if fullname.endswith(endswith): + file_path.append(fullname) + return file_path + +def rot_yaw(yaw): + cs = np.cos(yaw) + sn = np.sin(yaw) + return np.array([[cs,0,sn],[0,1,0],[-sn,0,cs]]) + + +def my_quat_rotate(q, v): + shape = q.shape + q_w = q[:, -1] + q_vec = q[:, :3] + a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) + b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 + c = q_vec * \ + torch.bmm(q_vec.view(shape[0], 1, 3), v.view( + shape[0], 3, 1)).squeeze(-1) * 2.0 + return a + b + c + + +def calc_heading(q): + ref_dir = torch.zeros_like(q[..., 0:3]) + ref_dir[..., 2] = 1 + rot_dir = my_quat_rotate(q, ref_dir) + + heading = torch.atan2(rot_dir[..., 0], rot_dir[..., 2]) + return heading + + +def calc_heading_quat_inv(q): + heading = calc_heading(q) + axis = torch.zeros_like(q[..., 0:3]) + axis[..., 1] = 1 + + return -heading, axis + +def accumulate_rotations(relative_rotations): + """Accumulate relative rotations to get the overall rotation""" + # Initial rotation is the rotation matrix + R_total = [relative_rotations[0]] + # Iterate through all relative rotations, accumulating them + for R_rel in relative_rotations[1:]: + R_total.append(np.matmul(R_rel, R_total[-1])) + + return np.array(R_total) + +def recover_from_local_position(final_x, njoint): + # take positions_no_heading: local position on xz ori, no heading + # velocities_root_xy_no_heading: to recover translation + # global_heading_diff_rot: to recover root rotation + nfrm, _ = final_x.shape + positions_no_heading = final_x[:,8:8+3*njoint].reshape(nfrm, -1, 3) # frames, njoints * 3 + velocities_root_xy_no_heading = final_x[:,:2] # frames, 2 + global_heading_diff_rot = final_x[:,2:8] # frames, 6 + + # recover global heading + global_heading_rot = accumulate_rotations(rotation_6d_to_matrix(torch.from_numpy(global_heading_diff_rot)).numpy()) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + # add global heading to position + positions_with_heading = np.matmul(np.repeat(inv_global_heading_rot[:, None,:, :], njoint, axis=1), positions_no_heading[...,None]).squeeze(-1) + + # recover root translation + # add heading to velocities_root_xy_no_heading + + velocities_root_xyz_no_heading = np.zeros((velocities_root_xy_no_heading.shape[0], 3)) + velocities_root_xyz_no_heading[:, 0] = velocities_root_xy_no_heading[:, 0] + velocities_root_xyz_no_heading[:, 2] = velocities_root_xy_no_heading[:, 1] + velocities_root_xyz_no_heading[1:, :] = np.matmul(inv_global_heading_rot[:-1], velocities_root_xyz_no_heading[1:, :,None]).squeeze(-1) + + root_translation = np.cumsum(velocities_root_xyz_no_heading, axis=0) + + # add root translation + positions_with_heading[:, :, 0] += root_translation[:, 0:1] + positions_with_heading[:, :, 2] += root_translation[:, 2:] + + return positions_with_heading + + +# add hip height to translation when recoverring from rotation +def recover_from_local_rotation(final_x, njoint): + nfrm, _ = final_x.shape + rotations_matrix = rotation_6d_to_matrix(torch.from_numpy(final_x[:,8+6*njoint:8+12*njoint]).reshape(nfrm, -1, 6)).numpy() + global_heading_diff_rot = final_x[:,2:8] + velocities_root_xy_no_heading = final_x[:,:2] + positions_no_heading = final_x[:, 8:8+3*njoint].reshape(nfrm, -1, 3) + height = positions_no_heading[:, 0, 1] + + global_heading_rot = accumulate_rotations(rotation_6d_to_matrix(torch.from_numpy(global_heading_diff_rot)).numpy()) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + # recover root rotation + rotations_matrix[:,0,...] = np.matmul(inv_global_heading_rot, rotations_matrix[:,0,...]) + velocities_root_xyz_no_heading = np.zeros((velocities_root_xy_no_heading.shape[0], 3)) + velocities_root_xyz_no_heading[:, 0] = velocities_root_xy_no_heading[:, 0] + velocities_root_xyz_no_heading[:, 2] = velocities_root_xy_no_heading[:, 1] + velocities_root_xyz_no_heading[1:, :] = np.matmul(inv_global_heading_rot[:-1], velocities_root_xyz_no_heading[1:, :,None]).squeeze(-1) + root_translation = np.cumsum(velocities_root_xyz_no_heading, axis=0) + root_translation[:, 1] = height + smpl_85 = rotations_matrix_to_smpl85(rotations_matrix, root_translation) + return smpl_85 + +def rotations_matrix_to_smpl85(rotations_matrix, translation): + nfrm, njoint, _, _ = rotations_matrix.shape + axis_angle = matrix_to_axis_angle(torch.from_numpy(rotations_matrix)).numpy().reshape(nfrm, -1) + smpl_85 = np.concatenate([axis_angle, np.zeros((nfrm, 6)), translation, np.zeros((nfrm, 10))], axis=-1) + return smpl_85 + + + + +def smpl85_2_smpl322(smpl_85_data): + result = np.concatenate((smpl_85_data[:,:66], np.zeros((smpl_85_data.shape[0], 90)), np.zeros((smpl_85_data.shape[0], 3)), np.zeros((smpl_85_data.shape[0], 50)), np.zeros((smpl_85_data.shape[0], 100)), smpl_85_data[:,72:72+3], smpl_85_data[:,75:]), axis=-1) + return result + +def visualize_smpl_85(data, title=None, output_path='visualize_result', name='', fps=60): + # data: torch.Size([nframe, 85]) + smpl_85_data = data + if len(smpl_85_data.shape) == 3: + smpl_85_data = np.squeeze(smpl_85_data, axis=0) + + smpl_85_data = smpl85_2_smpl322(smpl_85_data) + vert, joints, motion, faces = process_smplx_data(smpl_85_data, norm_global_orient=False, transform=False) + xyz = joints[:, :22, :].reshape(1, -1, 22, 3).detach().cpu().numpy() + os.makedirs(os.path.dirname(output_path), exist_ok=True) + pose_vis = plot_3d.draw_to_batch(xyz, title_batch=title, outname=[f'{output_path}/rot_{name}.mp4'], fps=fps) + return output_path + + +def visualize_pos_xyz(xyz, title_batch=None, output_path='./', name='', fps=60): + # xyz: torch.Size([nframe, 22, 3]) + xyz = xyz[:1] + bs, seq = xyz.shape[:2] + xyz = xyz.reshape(bs, seq, -1, 3) + os.makedirs(os.path.dirname(output_path), exist_ok=True) + plot_xyz = plot_3d.draw_to_batch(xyz, title_batch, [f'{output_path}/pos_{name}.mp4'], fps=fps) + return output_path + + +if __name__ == '__main__': + njoint = 22 + parser = argparse.ArgumentParser(description='Visualize new representation.') + parser.add_argument('--input_dir', type=str, required=True, help='Input path') + parser.add_argument('--mode', type=str, required=True, default='rot', choices=['rot', 'pos'], help='Recover from rotation or position') + parser.add_argument('--output_dir', type=str, required=True, help='Output path') + args = parser.parse_args() + os.makedirs(args.output_dir, exist_ok=True) + + for data_path in tqdm.tqdm(findAllFile(args.input_dir, endswith='.npy')): + data_272 = np.load(data_path) + if args.mode == 'rot': + # recover from rotation + from visualization.smplx2joints import process_smplx_data + global_rotation = recover_from_local_rotation(data_272, njoint) # get the 85-dim smpl data + visualize_smpl_85(global_rotation, output_path=args.output_dir, name=data_path.split('/')[-1].split('.')[0]) + print(f"Visualized results are saved in {args.output_dir}") + else: + # recover from position + global_position = recover_from_local_position(data_272, njoint) + global_position = np.expand_dims(global_position, axis=0) + visualize_pos_xyz(global_position, output_path=args.output_dir, name=data_path.split('/')[-1].split('.')[0]) + print(f"Visualized results are saved in {args.output_dir}") diff --git a/representation_272.py b/representation_272.py new file mode 100644 index 0000000000000000000000000000000000000000..4f95911d0aa5611d8c268a9de0e6a063bd649cee --- /dev/null +++ b/representation_272.py @@ -0,0 +1,121 @@ +# representation: 272 dim +# :2 local xz velocities of root, no heading, can recover translation +# 2:8 heading angular velocities, 6d rotation, can recover heading +# 8:8+3*njoint local position, no heading, all at xz origin +# 8+3*njoint:8+6*njoint local velocities, no heading, all at xz origin, can recover local postion +# 8+6*njoint:8+12*njoint local rotations, 6d rotation, no heading, all frames z+ + +import numpy as np +from utils.face_z_align_util import expmap_to_quaternion, quaternion_to_matrix, quaternion_to_matrix_np, matrix_to_rotation_6d, qrot_np, rotation_6d_to_matrix, matrix_to_axis_angle +import copy +import torch +import scipy.ndimage as ndimage +from tqdm import tqdm +import os +import argparse + +def findAllFile(base): + file_path = [] + for root, ds, fs in os.walk(base, followlinks=True): + for f in fs: + fullname = os.path.join(root, f) + file_path.append(fullname) + return file_path + +def rot_yaw(yaw): + cs = np.cos(yaw) + sn = np.sin(yaw) + return np.array([[cs,0,sn],[0,1,0],[-sn,0,cs]]) + +def foot_detect(global_positions, thres): + """ + derived from https://github.com/orangeduck/Motion-Matching/blob/37df18afc44e8acca3af5e85dff96effa6a34b03/resources/generate_database.py#L160 + """ + left_foot = 10 + right_foot = 11 + global_velocities = global_positions[1:] - global_positions[:-1] + contact_velocities = np.sqrt(np.sum(global_velocities[:, np.array([left_foot, right_foot])]**2, axis=-1)) + contacts = contact_velocities < thres + # Median filter here acts as a kind of "majority vote", and removes + # small regions where contact is either active or inactive + for ci in range(contacts.shape[1]): + contacts[:,ci] = ndimage.median_filter( + contacts[:,ci], + size=6, + mode='nearest') + return contacts + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Process some paths.') + parser.add_argument('--filedir', type=str, required=True, help='Input directory path') + args = parser.parse_args() + + bad_cnt = 0 + for file in tqdm(findAllFile(os.path.join(args.filedir, 'smpl_85_face_z_transform_joints'))): + output_file = file.replace('smpl_85_face_z_transform_joints', 'Representation_272') + os.makedirs(os.path.dirname(output_file), exist_ok=True) + root_idx = 0 + # get joint positions + position_data = np.load(file) + position_data = position_data[:, :22, :3] + nfrm, njoint, _ = position_data.shape + # get smpl rotations + rotation_smpl_axis_angle = np.load(file.replace('smpl_85_face_z_transform_joints', 'smpl_85_face_z_transform')) + rotations_wxyz = expmap_to_quaternion(rotation_smpl_axis_angle[:, :66].reshape(nfrm, njoint, 3)) + + rotations_matrix = quaternion_to_matrix_np(rotations_wxyz) # nframe, njoint, 3, 3 + + # put on floor and put root on origin for the first frame + ori = copy.deepcopy(position_data[0,root_idx]) # first frame root position + y_min = np.min(position_data[:,:,1]) + ori[1] = y_min + position_data = position_data - ori + velocities_root = position_data[1:,root_idx,:] - position_data[:-1,root_idx,:] + + # smpl unit is m and 0.15 is given as cm, may need to change depending on the datasets + contacts = foot_detect(position_data, 0.15/100) + + # calculate local position, all frames on xz origin + position_data[:,:,0] -= position_data[:,0:1,0] + position_data[:,:,2] -= position_data[:,0:1,2] + + # calculate heading + global_heading = - np.arctan2(rotations_matrix[:,root_idx,0,2], rotations_matrix[:, root_idx, 2,2]) + global_heading_rot = np.array([rot_yaw(x) for x in global_heading]) + global_heading_diff = global_heading[1:] - global_heading[:-1] + global_heading_diff_rot = np.array([rot_yaw(x) for x in global_heading_diff]) + + # calculate positions no heading + positions_no_heading = np.matmul(np.repeat(global_heading_rot[:, None,:, :], njoint, axis=1), position_data[...,None]).squeeze(-1) + + # calculate velocity no heading + velocities_no_heading = positions_no_heading[1:] - positions_no_heading[:-1] + + # calculate root velocity_xz_no_heading + velocities_root_xy_no_heading = np.matmul(global_heading_rot[:-1], velocities_root[:, :, None]).squeeze()[...,[0,2]] + + # calculate rotations no heading + rotations_matrix[:,0,...] = np.matmul(global_heading_rot, rotations_matrix[:,0,...]) + + # concat all + size_frame = 8+njoint*3+njoint*3+njoint*6 + final_x = np.zeros((nfrm, size_frame)) + + # set the first frame of the root rotation to identity + final_x[0, 2] = 1 + final_x[0, 6] = 1 + try: + final_x[1:,2:8] = matrix_to_rotation_6d(torch.from_numpy(global_heading_diff_rot)).numpy() # take 6D rotation + except: + bad_cnt += 1 + continue + final_x[1:,:2] = velocities_root_xy_no_heading + final_x[:,8:8+3*njoint] = np.reshape(positions_no_heading, (nfrm,-1)) + final_x[1:,8+3*njoint:8+6*njoint] = np.reshape(velocities_no_heading, (nfrm-1,-1)) + final_x[:,8+6*njoint:8+12*njoint] = np.reshape(rotations_matrix[..., :, :2, :], (nfrm,-1)) # take 6D rotation + np.save(output_file, final_x) + print(f"bad_cnt: {bad_cnt}") + print(f"Processed files are saved in {args.filedir}/Representation_272") + + diff --git a/representation_272_to_bvh.py b/representation_272_to_bvh.py new file mode 100644 index 0000000000000000000000000000000000000000..34a2d9c1e19d2c6f9fd6636c350d638ce89d357f --- /dev/null +++ b/representation_272_to_bvh.py @@ -0,0 +1,260 @@ +import torch +import numpy as np +import argparse +import pickle +import smplx + +from utils import bvh, quat +from utils.face_z_align_util import rotation_6d_to_matrix, matrix_to_axis_angle +from tqdm import tqdm +import os + +def findAllFile(base): + file_path = [] + for root, ds, fs in os.walk(base, followlinks=True): + for f in fs: + fullname = os.path.join(root, f) + file_path.append(fullname) + return file_path + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_path", type=str, default="body_models/human_model_files") + parser.add_argument("--model_type", type=str, default="smpl", choices=["smpl", "smplx"]) + parser.add_argument("--gender", type=str, default="NEUTRAL", choices=["MALE", "FEMALE", "NEUTRAL"]) + parser.add_argument("--num_betas", type=int, default=10, choices=[10, 300]) + parser.add_argument("--poses", type=str, default="./output/Representation_272") + parser.add_argument("--fps", type=int, default=60) + parser.add_argument("--output", type=str, default="./output/Representation_272") + parser.add_argument("--mirror", action="store_true") + parser.add_argument("--is_folder", action="store_true") + return parser.parse_args() + +def axis_angle_to_quaternion(axis_angle): + """ + Convert rotations given as axis/angle to quaternions. + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + +def mirror_rot_trans(lrot, trans, names, parents): + joints_mirror = np.array([( + names.index("Left"+n[5:]) if n.startswith("Right") else ( + names.index("Right"+n[4:]) if n.startswith("Left") else + names.index(n))) for n in names]) + + mirror_pos = np.array([-1, 1, 1]) + mirror_rot = np.array([1, 1, -1, -1]) + grot = quat.fk_rot(lrot, parents) + trans_mirror = mirror_pos * trans + grot_mirror = mirror_rot * grot[:,joints_mirror] + + return quat.ik_rot(grot_mirror, parents), trans_mirror + + +def accumulate_rotations(relative_rotations): + """Accumulate relative rotations to get overall rotation""" + # Initial rotation is rotation matrix + R_total = [relative_rotations[0]] + # Iterate through all relative rotations, accumulating them step by step + for R_rel in relative_rotations[1:]: + R_total.append(np.matmul(R_rel, R_total[-1])) + + return np.array(R_total) + +def rotations_matrix_to_smplx85(rotations_matrix, translation): + + nfrm, njoint, _, _ = rotations_matrix.shape + axis_angle = matrix_to_axis_angle(torch.from_numpy(rotations_matrix)).numpy().reshape(nfrm, -1) + smplx_85 = np.concatenate([axis_angle, np.zeros((nfrm, 6)), translation, np.zeros((nfrm, 10))], axis=-1) + return smplx_85 + + +def recover_from_local_rotation(final_x, njoint): + # take rotations_matrix: + + nfrm, _ = final_x.shape + rotations_matrix = rotation_6d_to_matrix(torch.from_numpy(final_x[:,8+6*njoint:8+12*njoint]).reshape(nfrm, -1, 6)).numpy() + global_heading_diff_rot = final_x[:,2:8] + velocities_root_xy_no_heading = final_x[:,:2] + positions_no_heading = final_x[:, 8:8+3*njoint].reshape(nfrm, -1, 3) + height = positions_no_heading[:, 0, 1] + + global_heading_rot = accumulate_rotations(rotation_6d_to_matrix(torch.from_numpy(global_heading_diff_rot)).numpy()) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + # recover root rotation + + rotations_matrix[:,0,...] = np.matmul(inv_global_heading_rot, rotations_matrix[:,0,...]) + + velocities_root_xyz_no_heading = np.zeros((velocities_root_xy_no_heading.shape[0], 3)) + velocities_root_xyz_no_heading[:, 0] = velocities_root_xy_no_heading[:, 0] + velocities_root_xyz_no_heading[:, 2] = velocities_root_xy_no_heading[:, 1] + velocities_root_xyz_no_heading[1:, :] = np.matmul(inv_global_heading_rot[:-1], velocities_root_xyz_no_heading[1:, :,None]).squeeze(-1) + root_translation = np.cumsum(velocities_root_xyz_no_heading, axis=0) + root_translation[:, 1] = height + smplx_85 = rotations_matrix_to_smplx85(rotations_matrix, root_translation) + return smplx_85 + + +def smpl2bvh(model_path:str, poses:str, output:str, mirror:bool, + model_type="smpl", gender="MALE", + num_betas=10, fps=60) -> None: + """Save bvh file created by smpl parameters. + + Args: + model_path (str): Path to smpl models. + poses (str): Path to npz or pkl file. + output (str): Where to save bvh. + mirror (bool): Whether save mirror motion or not. + model_type (str, optional): I prepared "smpl" only. Defaults to "smpl". + gender (str, optional): Gender Information. Defaults to "MALE". + num_betas (int, optional): How many pca parameters to use in SMPL. Defaults to 10. + fps (int, optional): Frame per second. Defaults to 30. + """ + + names = [ + "Pelvis", + "Left_hip", + "Right_hip", + "Spine1", + "Left_knee", + "Right_knee", + "Spine2", + "Left_ankle", + "Right_ankle", + "Spine3", + "Left_foot", + "Right_foot", + "Neck", + "Left_collar", + "Right_collar", + "Head", + "Left_shoulder", + "Right_shoulder", + "Left_elbow", + "Right_elbow", + "Left_wrist", + "Right_wrist", + "Left_palm", + "Right_palm", + ] + + model = smplx.create(model_path=model_path, + model_type=model_type, + gender=gender, + batch_size=1) + + parents = model.parents.detach().cpu().numpy() + # You can define betas like this.(default betas are 0 at all.) + rest = model( + # betas = torch.randn([1, num_betas], dtype=torch.float32) + ) + rest_pose = rest.joints.detach().cpu().numpy().squeeze()[:24,:] + + root_offset = rest_pose[0] + offsets = rest_pose - rest_pose[parents] + offsets[0] = root_offset + offsets *= 1 + + + scaling = None + + + poses = np.load(poses) + assert poses.shape[-1] == 272 + + poses = recover_from_local_rotation(poses, 22) + assert poses.shape[-1] == 85 + + rots = poses[:, :72].reshape(-1, 24, 3) + trans = poses[:, 72:75] + + + if scaling is not None: + trans /= scaling + + + # # to quaternion + rots = axis_angle_to_quaternion(torch.from_numpy(rots)).numpy() + order = "zyx" + pos = offsets[None].repeat(len(rots), axis=0) + positions = pos.copy() + positions[:,0] += trans + # put positions on floor + rotations = np.degrees(quat.to_euler(rots, order=order)) + + bvh_data ={ + "rotations": rotations, + "positions": positions, + "offsets": offsets, + "parents": parents, + "names": names, + "order": order, + "frametime": 1 / fps, + } + + if not output.endswith(".bvh"): + output = output + ".bvh" + + os.makedirs(os.path.dirname(output), exist_ok=True) + bvh.save(output, bvh_data) + + if mirror: + rots_mirror, trans_mirror = mirror_rot_trans( + rots, trans, names, parents) + positions_mirror = pos.copy() + positions_mirror[:,0] += trans_mirror + rotations_mirror = np.degrees( + quat.to_euler(rots_mirror, order=order)) + + bvh_data ={ + "rotations": rotations_mirror, + "positions": positions_mirror, + "offsets": offsets, + "parents": parents, + "names": names, + "order": order, + "frametime": 1 / fps, + } + + output_mirror = output.split(".")[0] + "_mirror.bvh" + bvh.save(output_mirror, bvh_data) + +if __name__ == "__main__": + args = parse_args() + if args.is_folder: + for file in tqdm(findAllFile(args.poses)): + if file.endswith(".npy"): + smpl2bvh(model_path=args.model_path, model_type=args.model_type, + mirror = args.mirror, gender=args.gender, + poses=file, num_betas=args.num_betas, + fps=args.fps, output=file.replace(args.poses, args.output).replace(".npy", ".bvh")) + else: + smpl2bvh(model_path=args.model_path, model_type=args.model_type, + mirror = args.mirror, gender=args.gender, + poses=args.poses, num_betas=args.num_betas, + fps=args.fps, output=args.output) + + print(f"Processed BVH file is saved in {args.output}") diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e4c4c87a349469d25db48c47a59d87ef9cd2926 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +smplx==0.1.28 +transformers +timm==1.0.12 +sentence-transformers +clip @ git+https://github.com/openai/CLIP.git@main#egg=clip +human-body-prior @ git+https://github.com/nghorbani/human_body_prior.git@master#egg=human-body-prior +gdown +chumpy==0.70 +scipy==1.7.1 +numpy==1.22.4 \ No newline at end of file diff --git a/train_causal_TAE.py b/train_causal_TAE.py new file mode 100644 index 0000000000000000000000000000000000000000..6efe4b8c903d000fad343d8ebebad5623c199aef --- /dev/null +++ b/train_causal_TAE.py @@ -0,0 +1,181 @@ +import os +import json +import numpy as np +import torch +import torch.optim as optim +from torch.utils.tensorboard import SummaryWriter +from accelerate import Accelerator +import models.tae as tae +import utils.losses as losses +import options.option_tae as option_tae +import utils.utils_model as utils_model +from humanml3d_272 import dataset_tae, dataset_eval_tae +import utils.eval_trans as eval_trans +import warnings +warnings.filterwarnings('ignore') + + +##### ---- Accelerator Setup ---- ##### +accelerator = Accelerator() +comp_device = accelerator.device +def update_lr_warm_up(optimizer, nb_iter, warm_up_iter, lr): + current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1) + for param_group in optimizer.param_groups: + param_group["lr"] = current_lr + return optimizer, current_lr + +##### ---- Exp dirs ---- ##### +args = option_tae.get_args_parser() +torch.manual_seed(args.seed) + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) + +logger.info(f'Training on {args.dataname}, motions are with {args.nb_joints} joints') + + + +##### ---- Dataloader ---- ##### +train_loader = dataset_tae.DATALoader(args.dataname, + args.batch_size, + window_size=args.window_size, + unit_length=2**args.down_t) + +val_loader = dataset_eval_tae.DATALoader(args.dataname, False, + 32, + unit_length=2**args.down_t) + +##### ---- Network ---- ##### +clip_range = [-30,20] + +net = tae.Causal_HumanTAE( + hidden_size=args.hidden_size, + down_t=args.down_t, + stride_t=args.stride_t, + depth=args.depth, + dilation_growth_rate=args.dilation_growth_rate, + activation='relu', + latent_dim=args.latent_dim, + clip_range=clip_range + ) + + +if args.resume_pth : + logger.info('loading checkpoint from {}'.format(args.resume_pth)) + ckpt = torch.load(args.resume_pth, map_location='cpu') + net.load_state_dict(ckpt, strict=True) +net.train() +net.to(comp_device) + +##### ---- Optimizer & Scheduler ---- ##### +optimizer = optim.AdamW(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.weight_decay) +scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_scheduler, gamma=args.gamma) + +net, optimizer, train_loader, val_loader = accelerator.prepare(net, optimizer, train_loader, val_loader) +train_loader_iter = dataset_tae.cycle(train_loader) + +Loss = losses.ReConsLoss(motion_dim=272) + +##### ------ warm-up ------- ##### +avg_recons, avg_kl, avg_root = 0., 0., 0. +for nb_iter in range(1, args.warm_up_iter): + + optimizer, current_lr = update_lr_warm_up(optimizer, nb_iter, args.warm_up_iter, args.lr) + + gt_motion = next(train_loader_iter) + gt_motion = gt_motion.to(comp_device).float() + + if args.num_gpus > 1: + pred_motion, mu, logvar = net.module(gt_motion) + else: + pred_motion, mu, logvar = net(gt_motion) + + loss_motion = Loss(pred_motion, gt_motion) + + loss_kl = Loss.forward_KL(mu, logvar) + loss_root = Loss.forward_root(pred_motion, gt_motion) + loss = loss_motion + loss_kl + args.root_loss * loss_root + + optimizer.zero_grad() + accelerator.backward(loss) + optimizer.step() + + avg_recons += loss_motion.item() + avg_kl += loss_kl.item() + avg_root += loss_root.item() + + if nb_iter % args.print_iter == 0 : + if accelerator.is_main_process: + avg_recons /= args.print_iter + avg_kl /= args.print_iter + avg_root /= args.print_iter + + logger.info(f"Warmup. Iter {nb_iter} : lr {current_lr:.5f} \t Recons. {avg_recons:.5f} \t KL. {avg_kl:.5f} \t Root. {avg_root:.5f}") + + + avg_recons, avg_kl, avg_root = 0., 0., 0. + +##### ---- Training ---- ##### +avg_recons, avg_kl, avg_root = 0., 0., 0. + +if args.num_gpus > 1: + best_iter, best_mpjpe, writer, logger = eval_trans.evaluation_tae_multi(args.out_dir, val_loader, net.module, logger, writer, 0, best_iter=0, best_mpjpe=1000, device=comp_device, accelerator=accelerator) +else: + best_iter, best_mpjpe, writer, logger = eval_trans.evaluation_tae_multi(args.out_dir, val_loader, net, logger, writer, 0, best_iter=0, best_mpjpe=1000, device=comp_device, accelerator=accelerator) + +for nb_iter in range(1, args.total_iter + 1): + + gt_motion = next(train_loader_iter) + gt_motion = gt_motion.to(comp_device).float() + + if args.num_gpus > 1: + pred_motion, mu, logvar = net.module(gt_motion) + else: + pred_motion, mu, logvar = net(gt_motion) + + loss_motion = Loss(pred_motion, gt_motion) + + loss_kl = Loss.forward_KL(mu, logvar) + + loss_root = Loss.forward_root(pred_motion, gt_motion) + loss = loss_motion + loss_kl + args.root_loss * loss_root + + + optimizer.zero_grad() + accelerator.backward(loss) + optimizer.step() + scheduler.step() + + try: + avg_recons += loss_motion.item() + avg_kl += loss_kl.item() + avg_root += loss_root.item() + except: + continue + + if nb_iter % args.print_iter == 0 : + if accelerator.is_main_process: + avg_recons /= args.print_iter + avg_kl /= args.print_iter + avg_root /= args.print_iter + writer.add_scalar('./Train/Recon_loss', avg_recons, nb_iter) + writer.add_scalar('./Train/KL', avg_kl, nb_iter) + writer.add_scalar('./Train/Root_loss', avg_root, nb_iter) + writer.add_scalar('./Train/LR', current_lr, nb_iter) + + logger.info(f"Train. Iter {nb_iter} : \t Recons. {avg_recons:.5f} \t KL. {avg_kl:.5f} \t Root. {avg_root:.5f}") + + avg_recons, avg_kl, avg_root = 0., 0., 0. + + if nb_iter % args.eval_iter==0: + if args.num_gpus > 1: + best_iter, best_mpjpe, writer, logger = eval_trans.evaluation_tae_multi(args.out_dir, val_loader, net.module, logger, writer, nb_iter, best_iter, best_mpjpe, device=comp_device, accelerator=accelerator) + else: + best_iter, best_mpjpe, writer, logger = eval_trans.evaluation_tae_multi(args.out_dir, val_loader, net, logger, writer, nb_iter, best_iter, best_mpjpe, device=comp_device, accelerator=accelerator) + +accelerator.wait_for_everyone() \ No newline at end of file diff --git a/train_motionstreamer.py b/train_motionstreamer.py new file mode 100644 index 0000000000000000000000000000000000000000..87ec4634d30fc058865972a0ee818ab907a4601f --- /dev/null +++ b/train_motionstreamer.py @@ -0,0 +1,261 @@ +"""Train streaming motion generation model (MotionStreamer) with llama blocks, Two-Forward strategy and QK-Norm, using the motion latents encoded by the Causal TAE (trained in the first stage).""" + + +import os +import torch +import numpy as np +import random +from torch.utils.tensorboard import SummaryWriter +import json +from accelerate import Accelerator +from models.llama_model import LLaMAHF, LLaMAHFConfig +import options.option_transformer as option_trans +import utils.utils_model as utils_model +import warnings +from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR +warnings.filterwarnings('ignore') + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +##### ---- Exp dirs ---- ##### +args = option_trans.get_args_parser() +torch.manual_seed(args.seed) + +# warm-up + cosine decay scheduler +class WarmupCosineDecayScheduler: + def __init__(self, optimizer, warmup_iters, total_iters, min_lr=0): + self.optimizer = optimizer + self.warmup_iters = warmup_iters + self.total_iters = total_iters + self.min_lr = min_lr + + self.warmup_scheduler = LambdaLR(optimizer, lr_lambda=self.warmup_lambda) + + self.cosine_scheduler = CosineAnnealingLR(optimizer, + T_max=total_iters - warmup_iters, + eta_min=min_lr) + + def warmup_lambda(self, current_iter): + if current_iter < self.warmup_iters: + return float(current_iter) / float(max(1, self.warmup_iters)) + return 1.0 + + def step(self, current_iter): + if current_iter < self.warmup_iters: + self.warmup_scheduler.step() + else: + self.cosine_scheduler.step() + + def state_dict(self): + return { + 'warmup_iters': self.warmup_iters, + 'total_iters': self.total_iters, + 'min_lr': self.min_lr, + } + + def load_state_dict(self, state_dict): + self.warmup_iters = state_dict['warmup_iters'] + self.total_iters = state_dict['total_iters'] + self.min_lr = state_dict['min_lr'] + + + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + + +##### ---- Accelerator Setup ---- ##### +accelerator = Accelerator() +comp_device = accelerator.device + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) + +##### ---- Dataloader ---- ##### +from humanml3d_272 import dataset_TM_train_motionstreamer +train_loader = dataset_TM_train_motionstreamer.DATALoader(args.dataname, args.batch_size, unit_length=2**args.down_t, latent_dir=args.latent_dir) + + +##### ---- Network ---- ##### +from sentence_transformers import SentenceTransformer +t5_model = SentenceTransformer('sentencet5-xxl/') +t5_model.eval() +for p in t5_model.parameters(): + p.requires_grad = False + + +config = LLaMAHFConfig.from_name('Normal_size') +config.block_size = 78 +trans_encoder = LLaMAHF(config, args.num_diffusion_head_layers, args.latent_dim, comp_device) + +if args.resume_trans is not None: + print('loading transformer checkpoint from {}'.format(args.resume_trans)) + ckpt = torch.load(args.resume_trans, map_location='cpu') + new_ckpt_trans = {} + for key in ckpt['trans'].keys(): + if key.split('.')[0]=='module': + new_key = '.'.join(key.split('.')[1:]) + else: + new_key = key + new_ckpt_trans[new_key] = ckpt['trans'][key] + trans_encoder.load_state_dict(new_ckpt_trans, strict=True) +trans_encoder.train() +trans_encoder.to(comp_device) + + +##### ---- Optimizer & Scheduler ---- ##### +optimizer = utils_model.initial_optim(args.decay_option, args.lr, args.weight_decay, trans_encoder, args.optimizer) +scheduler = WarmupCosineDecayScheduler(optimizer, args.total_iter//10, args.total_iter) + +t5_model, trans_encoder, optimizer, train_loader = accelerator.prepare(t5_model, trans_encoder, optimizer, train_loader) +train_loader_iter = dataset_TM_train_motionstreamer.cycle(train_loader) + + +diffmlps_batch_mul = 4 +def lengths_to_mask(lengths, max_len): + mask = torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1) + return mask +def get_mask_subset_prob(mask, prob): + subset_mask = torch.bernoulli(mask, p=prob) & mask + return subset_mask + + +def uniform(shape, device=None): + return torch.zeros(shape, device=device).float().uniform_(0, 1) + +import math +def cosine_schedule(t): + return torch.cos(t * math.pi * 0.5) + + +#--------------2-forward:------------------ +def cosine_decay(step, total_steps, start_value=1.0, end_value=0.0): + step = torch.tensor(step, dtype=torch.float32) + total_steps = torch.tensor(total_steps, dtype=torch.float32) + cosine_factor = 0.5 * (1 + torch.cos(torch.pi * step / total_steps)) + return start_value + (end_value - start_value) * cosine_factor + +def replace_with_pred(latents, pred_xstart, step, total_steps): + decay_factor = cosine_decay(step, total_steps).to(latents.device) + b, l, d = latents.shape + num_replace = int(l * decay_factor) + + replace_indices = torch.randperm(l)[:num_replace] + + replace_mask = torch.zeros(b, l, dtype=torch.bool).to(latents.device) + replace_mask[:, replace_indices] = 1 + + updated_latents = latents.clone() + updated_latents[replace_mask] = pred_xstart[replace_mask] + + return updated_latents + +def forward_loss_withmask_2_forward_streaming(latents, trans, m_lens, feat_text, step, total_steps, A_token_length): + latents = latents.to(comp_device) + feat_text = feat_text.to(comp_device) + A_token_length = A_token_length.to(comp_device) + conditions = trans(latents, feat_text) + conditions = conditions.contiguous() + z = conditions[:,:-1,:] + + b, l, d = latents.shape + mask = lengths_to_mask(m_lens, l) + + for j in range(b): + mask[j, :A_token_length[j].item()] = False # A_motion token: do not compute loss + + mask = mask.reshape(b * l).repeat(diffmlps_batch_mul) + + target = latents.clone().detach() + target = target.reshape(b * l, -1) + z = z.reshape(b * l, -1) + + with torch.no_grad(): + loss, pred_xstart = trans.diff_loss(target=target, z=z) + + pred_xstart = pred_xstart.clone().detach() + pred_xstart = pred_xstart.reshape(b, l, -1) + + # do not replace A_motion tokens + for k in range(b): + pred_xstart[k, :A_token_length[k].item(),:] = latents[k, :A_token_length[k].item(),:] + + updated_latents = replace_with_pred(latents, pred_xstart, step, total_steps) + updated_conditions = trans(updated_latents, feat_text) + updated_conditions = updated_conditions.contiguous() + updated_z = updated_conditions[:,:-1,:] + + updated_target = latents.clone().detach() + + updated_target = updated_target.reshape(b * l, -1).repeat(diffmlps_batch_mul, 1) + updated_z = updated_z.reshape(b * l, -1).repeat(diffmlps_batch_mul, 1) + + updated_target = updated_target[mask] + updated_z = updated_z[mask] + + updated_loss, updated_pred_xstart = trans.diff_loss(target=updated_target, z=updated_z) + + return updated_loss + + +##### ---- Training Loop ---- ##### +nb_iter, avg_loss_cls = 0, 0. + +while nb_iter <= args.total_iter: + batch = next(train_loader_iter) + caption, m_tokens, m_tokens_len, A_token_length = batch + caption = list(caption) + m_tokens, m_tokens_len = m_tokens.to(comp_device), m_tokens_len.to(comp_device) + A_token_length = A_token_length.to(comp_device) + + bs = len(caption) + num_masked = int(bs * 0.1) # 10% + mask_indices = random.sample(range(bs), num_masked) + + for idx in mask_indices: + caption[idx] = '' + + feat_text = torch.from_numpy(t5_model.encode(caption)).float() + feat_text = feat_text.to(comp_device) + + # -------gt-------- + input_latent = m_tokens[:,:-1,:] # continuous token + + loss_cls = 0.0 + + if args.num_gpus > 1: + loss_cls = forward_loss_withmask_2_forward_streaming(latents=input_latent, trans=trans_encoder.module, m_lens = m_tokens_len, feat_text=feat_text, step=nb_iter, total_steps=args.total_iter, A_token_length=A_token_length) + else: + loss_cls = forward_loss_withmask_2_forward_streaming(latents=input_latent, trans=trans_encoder, m_lens = m_tokens_len, feat_text=feat_text, step=nb_iter, total_steps=args.total_iter, A_token_length=A_token_length) + + + # backward & optimizer step + optimizer.zero_grad() + accelerator.backward(loss_cls) + optimizer.step() + scheduler.step(nb_iter) + + avg_loss_cls = avg_loss_cls + loss_cls.item() + + nb_iter += 1 + args.print_iter = 100 + if nb_iter % args.print_iter == 0 : + if accelerator.is_main_process: + avg_loss_cls = avg_loss_cls / args.print_iter + writer.add_scalar('./Loss/train', avg_loss_cls, nb_iter) + writer.add_scalar('./LR/train', optimizer.param_groups[0]['lr'], nb_iter) + msg = f"Train. Iter {nb_iter} : Loss. {avg_loss_cls:.5f}" + logger.info(msg) + avg_loss_cls = 0. + + + args.save_iter = 10000 + if nb_iter % args.save_iter == 0: + # save checkpoint + if accelerator.is_main_process: + torch.save({ + 'trans': trans_encoder.state_dict(), + }, os.path.join(args.out_dir, f'latest.pth')) + +accelerator.wait_for_everyone() diff --git a/train_t2m.py b/train_t2m.py new file mode 100644 index 0000000000000000000000000000000000000000..602bdafaf7a6fe3817e1ed3b42c7bf1f8a161914 --- /dev/null +++ b/train_t2m.py @@ -0,0 +1,261 @@ +"""Train original text to motion generation model with llama blocks, Two-Forward strategy and QK-Norm, using the motion latents encoded by the Causal TAE (trained in the first stage).""" + +import os +import torch +import random +from torch.utils.tensorboard import SummaryWriter +import json +from accelerate import Accelerator + +from models.llama_model import LLaMAHF, LLaMAHFConfig +from humanml3d_272 import dataset_TM_train +import options.option_transformer as option_trans +import utils.utils_model as utils_model +import warnings +from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR +warnings.filterwarnings('ignore') + +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +##### ---- Exp dirs ---- ##### +args = option_trans.get_args_parser() +torch.manual_seed(args.seed) + +# warm-up + cosine decay scheduler +class WarmupCosineDecayScheduler: + def __init__(self, optimizer, warmup_iters, total_iters, min_lr=0): + self.optimizer = optimizer + self.warmup_iters = warmup_iters + self.total_iters = total_iters + self.min_lr = min_lr + + self.warmup_scheduler = LambdaLR(optimizer, lr_lambda=self.warmup_lambda) + + self.cosine_scheduler = CosineAnnealingLR(optimizer, + T_max=total_iters - warmup_iters, + eta_min=min_lr) + + def warmup_lambda(self, current_iter): + if current_iter < self.warmup_iters: + return float(current_iter) / float(max(1, self.warmup_iters)) + return 1.0 + + def step(self, current_iter): + if current_iter < self.warmup_iters: + self.warmup_scheduler.step() + else: + self.cosine_scheduler.step() + + def state_dict(self): + return { + 'warmup_iters': self.warmup_iters, + 'total_iters': self.total_iters, + 'min_lr': self.min_lr, + } + + def load_state_dict(self, state_dict): + self.warmup_iters = state_dict['warmup_iters'] + self.total_iters = state_dict['total_iters'] + self.min_lr = state_dict['min_lr'] + + +args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') +os.makedirs(args.out_dir, exist_ok = True) + + +##### ---- Accelerator Setup ---- ##### +accelerator = Accelerator() +comp_device = accelerator.device + +##### ---- Logger ---- ##### +logger = utils_model.get_logger(args.out_dir) +writer = SummaryWriter(args.out_dir) +logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) + + +##### ---- Dataloader ---- ##### +train_loader = dataset_TM_train.DATALoader(args.dataname, args.batch_size, args.latent_dir, unit_length=2**args.down_t) + + +##### ---- Network ---- ##### +from sentence_transformers import SentenceTransformer +t5_model = SentenceTransformer('sentencet5-xxl/') +t5_model.eval() +for p in t5_model.parameters(): + p.requires_grad = False + + +config = LLaMAHFConfig.from_name('Normal_size') +config.block_size = 78 +trans_encoder = LLaMAHF(config, args.num_diffusion_head_layers, args.latent_dim, comp_device) + +if args.resume_trans is not None: + print('loading transformer checkpoint from {}'.format(args.resume_trans)) + ckpt = torch.load(args.resume_trans, map_location='cpu') + new_ckpt_trans = {} + for key in ckpt['trans'].keys(): + if key.split('.')[0]=='module': + new_key = '.'.join(key.split('.')[1:]) + else: + new_key = key + new_ckpt_trans[new_key] = ckpt['trans'][key] + trans_encoder.load_state_dict(new_ckpt_trans, strict=True) +trans_encoder.train() +trans_encoder.to(comp_device) + + +##### ---- Optimizer & Scheduler ---- ##### +optimizer = utils_model.initial_optim(args.decay_option, args.lr, args.weight_decay, trans_encoder, args.optimizer) +scheduler = WarmupCosineDecayScheduler(optimizer, args.total_iter//10, args.total_iter) + + +t5_model, trans_encoder, optimizer, train_loader = accelerator.prepare(t5_model, trans_encoder, optimizer, train_loader) +train_loader_iter = dataset_TM_train.cycle(train_loader) + + +diffmlps_batch_mul = 4 +def lengths_to_mask(lengths, max_len): + mask = torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1) + return mask +def get_mask_subset_prob(mask, prob): + subset_mask = torch.bernoulli(mask, p=prob) & mask + return subset_mask + + +def uniform(shape, device=None): + return torch.zeros(shape, device=device).float().uniform_(0, 1) + +import math +def cosine_schedule(t): + return torch.cos(t * math.pi * 0.5) + + +#--------------2-forward:------------------ +def cosine_decay(step, total_steps, start_value=1.0, end_value=0.0): + + step = torch.tensor(step, dtype=torch.float32) + total_steps = torch.tensor(total_steps, dtype=torch.float32) + + cosine_factor = 0.5 * (1 + torch.cos(torch.pi * step / total_steps)) + return start_value + (end_value - start_value) * cosine_factor + +def replace_with_pred(latents, pred_xstart, step, total_steps): + + decay_factor = cosine_decay(step, total_steps).to(latents.device) + + b, l, d = latents.shape + num_replace = int(l * decay_factor) + + replace_indices = torch.randperm(l)[:num_replace] + + replace_mask = torch.zeros(b, l, dtype=torch.bool).to(latents.device) + replace_mask[:, replace_indices] = 1 + + updated_latents = latents.clone() + updated_latents[replace_mask] = pred_xstart[replace_mask] + + return updated_latents + +def forward_loss_withmask_2_forward(latents, trans, m_lens, feat_text, step, total_steps): + """z: condition; latents: gt""" + #--------------First Forward:------------------------- + conditions = trans(latents, feat_text) + conditions = conditions.contiguous() + z = conditions[:,:-1,:] + #------------------------------------------------- + + b, l, d = latents.shape + mask = lengths_to_mask(m_lens, l) + mask = mask.reshape(b * l).repeat(diffmlps_batch_mul) + + target = latents.clone().detach() + target = target.reshape(b * l, -1) + z = z.reshape(b * l, -1) + + with torch.no_grad(): + loss, pred_xstart = trans.diff_loss(target=target, z=z) + + pred_xstart = pred_xstart.clone().detach() + pred_xstart = pred_xstart.reshape(b, l, -1) + + #--------------Second Forward:------------------------- + # Update latents + updated_latents = replace_with_pred(latents, pred_xstart, step, total_steps) + updated_conditions = trans(updated_latents, feat_text) + updated_conditions = updated_conditions.contiguous() + updated_z = updated_conditions[:,:-1,:] + + updated_target = latents.clone().detach() + + updated_target = updated_target.reshape(b * l, -1).repeat(diffmlps_batch_mul, 1) + updated_z = updated_z.reshape(b * l, -1).repeat(diffmlps_batch_mul, 1) + + updated_target = updated_target[mask] + updated_z = updated_z[mask] + + updated_loss, _ = trans.diff_loss(target=updated_target, z=updated_z) + + return updated_loss +#------------------- + +##### ---- Training Loop ---- ##### +nb_iter, avg_loss = 0, 0. + +while nb_iter <= args.total_iter: + batch = next(train_loader_iter) + text, m_tokens, m_tokens_len = batch + text = list(text) + m_tokens, m_tokens_len = m_tokens.to(comp_device), m_tokens_len.to(comp_device) + + bs = len(text) + num_masked = int(bs * 0.1) # 10% + mask_indices = random.sample(range(bs), num_masked) + + for idx in mask_indices: + text[idx] = '' + + feat_text = torch.from_numpy(t5_model.encode(text)).float() + feat_text = feat_text.to(comp_device) + + # -------gt-------- + input_latent = m_tokens[:,:-1] # continuous token + loss = 0.0 + + if args.num_gpus > 1: + loss = forward_loss_withmask_2_forward(latents=input_latent, trans=trans_encoder.module, m_lens = m_tokens_len, feat_text=feat_text, step=nb_iter, total_steps=args.total_iter) + else: + loss = forward_loss_withmask_2_forward(latents=input_latent, trans=trans_encoder, m_lens = m_tokens_len, feat_text=feat_text, step=nb_iter, total_steps=args.total_iter) + + + optimizer.zero_grad() + accelerator.backward(loss) + optimizer.step() + scheduler.step(nb_iter) + + avg_loss = avg_loss + loss.item() + + nb_iter += 1 + args.print_iter = 100 + if nb_iter % args.print_iter == 0 : + if accelerator.is_main_process: + avg_loss = avg_loss / args.print_iter + writer.add_scalar('./Loss/train', avg_loss, nb_iter) + writer.add_scalar('./LR/train', optimizer.param_groups[0]['lr'], nb_iter) + msg = f"Train. Iter {nb_iter} : Loss. {avg_loss:.5f}" + logger.info(msg) + avg_loss = 0. + + + args.save_iter = 10000 + if nb_iter % args.save_iter == 0: + # save + if accelerator.is_main_process: + torch.save({ + 'trans': trans_encoder.state_dict(), + 'scheduler': scheduler.state_dict(), + 'optimizer': optimizer.state_dict() + }, os.path.join(args.out_dir, f'latest.pth')) + + + +accelerator.wait_for_everyone() diff --git a/utils/__pycache__/bvh.cpython-312.pyc b/utils/__pycache__/bvh.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..296e6a07128a2239b7f4f5ee7a6910e0052c9e15 Binary files /dev/null and b/utils/__pycache__/bvh.cpython-312.pyc differ diff --git a/utils/__pycache__/bvh.cpython-38.pyc b/utils/__pycache__/bvh.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e22dc191c2945d05e0bd235d755dad4038eeb9c0 Binary files /dev/null and b/utils/__pycache__/bvh.cpython-38.pyc differ diff --git a/utils/__pycache__/bvh.cpython-39.pyc b/utils/__pycache__/bvh.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e252b4fe10dae605b41beb3873b57c0e01c22ceb Binary files /dev/null and b/utils/__pycache__/bvh.cpython-39.pyc differ diff --git a/utils/__pycache__/face_z_align_util.cpython-312.pyc b/utils/__pycache__/face_z_align_util.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f3d5f7b39aba5460fe2d69aaba6e1656e893d7c Binary files /dev/null and b/utils/__pycache__/face_z_align_util.cpython-312.pyc differ diff --git a/utils/__pycache__/face_z_align_util.cpython-38.pyc b/utils/__pycache__/face_z_align_util.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35ada68749be8c9dc675aacde6a084734b3b0b14 Binary files /dev/null and b/utils/__pycache__/face_z_align_util.cpython-38.pyc differ diff --git a/utils/__pycache__/face_z_align_util.cpython-39.pyc b/utils/__pycache__/face_z_align_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dfdec049869dcdd2ae11cfbfeb1e3bd0108d28d Binary files /dev/null and b/utils/__pycache__/face_z_align_util.cpython-39.pyc differ diff --git a/utils/__pycache__/quat.cpython-312.pyc b/utils/__pycache__/quat.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d02322c357d413dbe2f7abf60b98f6f68cfa6731 Binary files /dev/null and b/utils/__pycache__/quat.cpython-312.pyc differ diff --git a/utils/__pycache__/quat.cpython-38.pyc b/utils/__pycache__/quat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0124041c881b09059c1cc20f8247fd303782a564 Binary files /dev/null and b/utils/__pycache__/quat.cpython-38.pyc differ diff --git a/utils/__pycache__/quat.cpython-39.pyc b/utils/__pycache__/quat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f3fd68f0078606d2a3b66e2cc2127ce1e117f3e Binary files /dev/null and b/utils/__pycache__/quat.cpython-39.pyc differ diff --git a/utils/bvh.py b/utils/bvh.py new file mode 100644 index 0000000000000000000000000000000000000000..a36402d9999cbc29eff95c9e9dc2e5fcdee27bda --- /dev/null +++ b/utils/bvh.py @@ -0,0 +1,242 @@ +import re +import numpy as np + +channelmap = { + 'Xrotation': 'x', + 'Yrotation': 'y', + 'Zrotation': 'z' +} + +channelmap_inv = { + 'x': 'Xrotation', + 'y': 'Yrotation', + 'z': 'Zrotation', +} + +ordermap = { + 'x': 0, + 'y': 1, + 'z': 2, +} + +def load(filename:str, order:str=None) -> dict: + """Loads a BVH file. + + Args: + filename (str): Path to the BVH file. + order (str): The order of the rotation channels. (i.e."xyz") + + Returns: + dict: A dictionary containing the following keys: + * names (list)(jnum): The names of the joints. + * parents (list)(jnum): The parent indices. + * offsets (np.ndarray)(jnum, 3): The offsets of the joints. + * rotations (np.ndarray)(fnum, jnum, 3) : The local coordinates of rotations of the joints. + * positions (np.ndarray)(fnum, jnum, 3) : The positions of the joints. + * order (str): The order of the channels. + * frametime (float): The time between two frames. + """ + + f = open(filename, "r") + + i = 0 + active = -1 + end_site = False + + # Create empty lists for saving parameters + names = [] + offsets = np.array([]).reshape((0, 3)) + parents = np.array([], dtype=int) + + # Parse the file, line by line + for line in f: + + if "HIERARCHY" in line: continue + if "MOTION" in line: continue + + rmatch = re.match(r"ROOT (\w+)", line) + if rmatch: + names.append(rmatch.group(1)) + offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0) + parents = np.append(parents, active) + active = (len(parents) - 1) + continue + + if "{" in line: continue + + if "}" in line: + if end_site: + end_site = False + else: + active = parents[active] + continue + + offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line) + if offmatch: + if not end_site: + offsets[active] = np.array([list(map(float, offmatch.groups()))]) + continue + + chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line) + if chanmatch: + channels = int(chanmatch.group(1)) + if order is None: + channelis = 0 if channels == 3 else 3 + channelie = 3 if channels == 3 else 6 + parts = line.split()[2 + channelis:2 + channelie] + if any([p not in channelmap for p in parts]): + continue + order = "".join([channelmap[p] for p in parts]) + continue + + jmatch = re.match("\s*JOINT\s+(\w+)", line) + if jmatch: + names.append(jmatch.group(1)) + offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0) + parents = np.append(parents, active) + active = (len(parents) - 1) + continue + + if "End Site" in line: + end_site = True + continue + + fmatch = re.match("\s*Frames:\s+(\d+)", line) + if fmatch: + fnum = int(fmatch.group(1)) + positions = offsets[None].repeat(fnum, axis=0) + rotations = np.zeros((fnum, len(offsets), 3)) + continue + + fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line) + if fmatch: + frametime = float(fmatch.group(1)) + continue + + dmatch = line.strip().split(' ') + if dmatch: + data_block = np.array(list(map(float, dmatch))) + N = len(parents) + fi = i + if channels == 3: + positions[fi, 0:1] = data_block[0:3] + rotations[fi, :] = data_block[3:].reshape(N, 3) + elif channels == 6: + data_block = data_block.reshape(N, 6) + positions[fi, :] = data_block[:, 0:3] + rotations[fi, :] = data_block[:, 3:6] + elif channels == 9: + positions[fi, 0] = data_block[0:3] + data_block = data_block[3:].reshape(N - 1, 9) + rotations[fi, 1:] = data_block[:, 3:6] + positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9] + else: + raise Exception("Too many channels! %i" % channels) + + i += 1 + + f.close() + + return { + 'rotations': rotations, + 'positions': positions, + 'offsets': offsets, + 'parents': parents, + 'names': names, + 'order': order, + 'frametime': frametime + } + + +def save_joint(f, data, t, i, save_order, order='zyx', save_positions=False): + + save_order.append(i) + + f.write("%sJOINT %s\n" % (t, data['names'][i])) + f.write("%s{\n" % t) + t += '\t' + + f.write("%sOFFSET %f %f %f\n" % (t, data['offsets'][i,0], data['offsets'][i,1], data['offsets'][i,2])) + + if save_positions: + f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % (t, + channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) + else: + f.write("%sCHANNELS 3 %s %s %s\n" % (t, + channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) + + end_site = True + + for j in range(len(data['parents'])): + if data['parents'][j] == i: + t = save_joint(f, data, t, j, save_order, order=order, save_positions=save_positions) + end_site = False + + if end_site: + f.write("%sEnd Site\n" % t) + f.write("%s{\n" % t) + t += '\t' + f.write("%sOFFSET %f %f %f\n" % (t, 0.0, 0.0, 0.0)) + t = t[:-1] + f.write("%s}\n" % t) + + t = t[:-1] + f.write("%s}\n" % t) + + return t + + +def save(filename, data, save_positions=False): + """ Save a joint hierarchy to a file. + + Args: + filename (str): The output will save on the bvh file. + data (dict): The data to save.(rotations, positions, offsets, parents, names, order, frametime) + save_positions (bool): Whether to save all of joint positions on MOTION. (False is recommended.) + """ + + order = data['order'] + frametime = data['frametime'] + + with open(filename, 'w') as f: + + t = "" + f.write("%sHIERARCHY\n" % t) + f.write("%sROOT %s\n" % (t, data['names'][0])) + f.write("%s{\n" % t) + t += '\t' + + f.write("%sOFFSET %f %f %f\n" % (t, data['offsets'][0,0], data['offsets'][0,1], data['offsets'][0,2]) ) + f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % + (t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) + + save_order = [0] + + for i in range(len(data['parents'])): + if data['parents'][i] == 0: + t = save_joint(f, data, t, i, save_order, order=order, save_positions=save_positions) + + t = t[:-1] + f.write("%s}\n" % t) + + rots, poss = data['rotations'], data['positions'] + + f.write("MOTION\n") + f.write("Frames: %i\n" % len(rots)); + f.write("Frame Time: %f\n" % frametime); + + for i in range(rots.shape[0]): + for j in save_order: + + if save_positions or j == 0: + + f.write("%f %f %f %f %f %f " % ( + poss[i,j,0], poss[i,j,1], poss[i,j,2], + rots[i,j,0], rots[i,j,1], rots[i,j,2])) + + else: + + f.write("%f %f %f " % ( + rots[i,j,0], rots[i,j,1], rots[i,j,2])) + + f.write("\n") \ No newline at end of file diff --git a/utils/config.py b/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3096ac9ea41c53a9d419af933efc1ea865ae6bbb --- /dev/null +++ b/utils/config.py @@ -0,0 +1,19 @@ +import os + +SMPL_DATA_PATH = "./body_models/smpl" + +HUMAN_MODEL_PATH = './body_models/human_model_files' + +SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl") +SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl") +JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, 'J_regressor_extra.npy') + +ROT_CONVENTION_TO_ROT_NUMBER = { + 'legacy': 23, + 'no_hands': 21, + 'full_hands': 51, + 'mitten_hands': 33, +} + +GENDERS = ['neutral', 'male', 'female'] +NUM_BETAS = 10 \ No newline at end of file diff --git a/utils/config_3.py b/utils/config_3.py new file mode 100644 index 0000000000000000000000000000000000000000..ffca34e0af4b38c4418f33621f3d2efbc969f7f6 --- /dev/null +++ b/utils/config_3.py @@ -0,0 +1,18 @@ +import os +import os.path as osp +import sys +import numpy as np + +class Config: + + flame_shape_params = 100 + flame_expression_params = 50 + face_corr_fname = '/cpfs01/user/xiaolixing/T2M-GPT/body_models/human_model_files/smplx/SMPL-X__FLAME_vertex_ids.npy' + + def set_additional_args(self, **kwargs): + names = self.__dict__ + for k, v in kwargs.items(): + names[k] = v + + +cfg = Config() \ No newline at end of file diff --git a/utils/eval_trans.py b/utils/eval_trans.py new file mode 100644 index 0000000000000000000000000000000000000000..b28e56da2e95337b5c2b28ef6a957d5cc84b1ff7 --- /dev/null +++ b/utils/eval_trans.py @@ -0,0 +1,434 @@ +import numpy as np +import torch +from scipy import linalg +from utils.face_z_align_util import rotation_6d_to_matrix +import visualization.plot_3d_global as plot_3d +import os + +def tensorborad_add_video_xyz(writer, xyz, nb_iter, tag, title_batch=None, outname=None, fps=30): + xyz = xyz[:1] + bs, seq = xyz.shape[:2] + xyz = xyz.reshape(bs, seq, -1, 3) + plot_xyz = plot_3d.draw_to_batch(xyz.cpu().numpy(),title_batch, outname) + plot_xyz = np.transpose(plot_xyz, (0, 1, 4, 2, 3)) + writer.add_video(tag, plot_xyz, nb_iter, fps = fps) + +def calculate_mpjpe(gt_joints, pred_joints): + assert gt_joints.shape == pred_joints.shape, f"GT shape: {gt_joints.shape}, pred shape: {pred_joints.shape}" + pelvis = gt_joints[:, [0]].mean(1) + gt_joints = gt_joints - torch.unsqueeze(pelvis, dim=1) + pelvis = pred_joints[:, [0]].mean(1) + pred_joints = pred_joints - torch.unsqueeze(pelvis, dim=1) + + mpjpe = torch.linalg.norm(pred_joints - gt_joints, dim=-1) + mpjpe_seq = mpjpe.mean(-1) + + return mpjpe_seq + + +def accumulate_rotations(relative_rotations): + R_total = [relative_rotations[0]] + for R_rel in relative_rotations[1:]: + R_total.append(np.matmul(R_rel, R_total[-1])) + return np.array(R_total) + +def recover_from_local_position(final_x, njoint): + + if final_x.ndim == 3: + bs, nfrm, _ = final_x.shape + is_batched = True + else: + nfrm, _ = final_x.shape + bs = 1 + is_batched = False + final_x = final_x.reshape(1, *final_x.shape) + + + positions_no_heading = final_x[:,:,8:8+3*njoint].reshape(bs, nfrm, njoint, 3) + velocities_root_xy_no_heading = final_x[:,:,:2] + global_heading_diff_rot = final_x[:,:,2:8] + + + positions_with_heading = [] + for b in range(bs): + + global_heading_rot = accumulate_rotations(rotation_6d_to_matrix(torch.from_numpy(global_heading_diff_rot[b])).numpy()) + inv_global_heading_rot = np.transpose(global_heading_rot, (0, 2, 1)) + + + curr_pos_with_heading = np.matmul(np.repeat(inv_global_heading_rot[:, None,:, :], njoint, axis=1), + positions_no_heading[b][...,None]).squeeze(-1) + + + velocities_root_xyz_no_heading = np.zeros((velocities_root_xy_no_heading[b].shape[0], 3)) + velocities_root_xyz_no_heading[:, 0] = velocities_root_xy_no_heading[b, :, 0] + velocities_root_xyz_no_heading[:, 2] = velocities_root_xy_no_heading[b, :, 1] + velocities_root_xyz_no_heading[1:, :] = np.matmul(inv_global_heading_rot[:-1], + velocities_root_xyz_no_heading[1:, :,None]).squeeze(-1) + + root_translation = np.cumsum(velocities_root_xyz_no_heading, axis=0) + + + curr_pos_with_heading[:, :, 0] += root_translation[:, 0:1] + curr_pos_with_heading[:, :, 2] += root_translation[:, 2:] + + positions_with_heading.append(curr_pos_with_heading) + + positions_with_heading = np.stack(positions_with_heading, axis=0) + + if not is_batched: + positions_with_heading = positions_with_heading.squeeze(0) + + return positions_with_heading + +@torch.no_grad() +def evaluation_gt(val_loader, evaluator, device=torch.device('cuda')): + textencoder, motionencoder = evaluator + motion_annotation_list = [] + R_precision_real = torch.tensor([0,0,0], device=device) + matching_score_real = torch.tensor(0.0, device=device) + nb_sample = torch.tensor(0, device=device) + + for batch in val_loader: + text, pose, m_length = batch + pose = pose.to(device).float() + et, em = textencoder(text).loc, motionencoder(pose, m_length).loc + motion_annotation_list.append(em) + temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True) + R_precision_real += torch.tensor(temp_R, device=device) + matching_score_real += torch.tensor(temp_match, device=device) + nb_sample += et.shape[0] + + motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() + + diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100) + + R_precision_real = R_precision_real / nb_sample + matching_score_real = matching_score_real / nb_sample + + # for GT data, no need to calculate fid + fid = 0.0 + + return fid, diversity_real, R_precision_real[0], R_precision_real[1], R_precision_real[2], matching_score_real + +# Single-GPU evaluation of Causal TAE (test time) +@torch.no_grad() +def evaluation_tae_single(out_dir, val_loader, net, logger, writer, evaluator, device=torch.device('cuda')): + net.eval() + nb_sample = 0 + + textencoder, motionencoder = evaluator + + motion_annotation_list = [] + motion_pred_list = [] + + nb_sample = torch.tensor(0, device=device) + mpjpe = torch.tensor(0.0, device=device) + num_poses = torch.tensor(0, device=device) + + for batch in val_loader: + motion, m_length = batch + motion = motion.to(device) + motion = motion.float() + bs, seq = motion.shape[0], motion.shape[1] + em = motionencoder(motion, m_length).loc + + num_joints = 22 + + pred_pose_eval = torch.zeros((bs, seq, motion.shape[-1])).to(device) + + for i in range(bs): + pose = val_loader.dataset.inv_transform(motion[i:i+1, :m_length[i], :].detach().cpu().numpy()) + pose_xyz = recover_from_local_position(pose.squeeze(0), num_joints) + pred_pose, _, _ = net(motion[i:i+1, :m_length[i]]) + + pred_pose_eval[i:i+1,:m_length[i],:] = pred_pose + + pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy()) + + pred_xyz = recover_from_local_position(pred_denorm.squeeze(0), num_joints) + pred_xyz = torch.from_numpy(pred_xyz).float().to(device) + pose_xyz = torch.from_numpy(pose_xyz).float().to(device) + + mpjpe += torch.sum(calculate_mpjpe(pose_xyz[:, :m_length[i]].squeeze(), pred_xyz[:, :m_length[i]].squeeze())) + num_poses += pose_xyz.shape[0] + + em_pred = motionencoder(pred_pose_eval, m_length).loc + + motion_pred_list.append(em_pred) + motion_annotation_list.append(em) + + nb_sample += bs + + mpjpe = mpjpe / num_poses + mpjpe = mpjpe * 1000 # mm + + motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() + motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy() + gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np) + mu, cov= calculate_activation_statistics(motion_pred_np) + + fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) + + msg = f"--> \t Eva. :, FID. {fid:.4f}, mpjpe. {mpjpe:.5f} (mm)" + logger.info(msg) + + return fid, mpjpe, writer, logger + +# Multi-GPU evaluation of Causal TAE (training time) +@torch.no_grad() +def evaluation_tae_multi(out_dir, val_loader, net, logger, writer, nb_iter, best_iter, best_mpjpe, draw = True, save = True, savegif = True, device=torch.device('cuda'), accelerator=None): + net.eval() + nb_sample = 0 + + draw_org = [] + draw_pred = [] + draw_text = [] + + nb_sample = torch.tensor(0, device=device) + mpjpe = torch.tensor(0.0, device=device) + num_poses = torch.tensor(0, device=device) + + for batch in val_loader: + motion, m_length = batch + motion = motion.to(device) + bs, seq = motion.shape[0], motion.shape[1] + num_joints = 22 + pred_pose_eval = torch.zeros((bs, seq, motion.shape[-1])).to(device) + + for i in range(bs): + pose = val_loader.dataset.inv_transform(motion[i:i+1, :m_length[i], :].detach().cpu().numpy()) + pose_xyz = recover_from_local_position(pose.squeeze(0), num_joints) + + pred_pose, _, _ = net(motion[i:i+1, :m_length[i]]) + pred_pose_eval[i:i+1,:m_length[i],:] = pred_pose + + if accelerator is None or accelerator.is_main_process: + pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy()) + pred_xyz = recover_from_local_position(pred_denorm.squeeze(0), num_joints) + pred_xyz = torch.from_numpy(pred_xyz).float().to(device) + pose_xyz = torch.from_numpy(pose_xyz).float().to(device) + mpjpe += torch.sum(calculate_mpjpe(pose_xyz[:, :m_length[i]].squeeze(), pred_xyz[:, :m_length[i]].squeeze())) + num_poses += pose_xyz.shape[0] + + if i < 4: + draw_org.append(pose_xyz) + draw_pred.append(pred_xyz) + draw_text.append('') + nb_sample += bs + + + if accelerator is not None: + accelerator.wait_for_everyone() + nb_sample = accelerator.reduce(nb_sample, reduction="sum") + mpjpe = accelerator.reduce(mpjpe, reduction="sum") + + if accelerator is None or accelerator.is_main_process: + mpjpe = mpjpe / num_poses + # transform mpjpe to mm + mpjpe = mpjpe * 1000 + msg = f"--> \t Eva. Iter {nb_iter} :, mpjpe. {mpjpe:.3f} (mm)" + logger.info(msg) + + # save visualization on tensorboard + if draw and (accelerator is None or accelerator.is_main_process): + writer.add_scalar('./Test/mpjpe', mpjpe, nb_iter) + + if nb_iter % 20000 == 0 : + for ii in range(4): + draw_org[ii] = draw_org[ii].unsqueeze(0) + tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/org_eval'+str(ii), title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'gt'+str(ii)+'.gif')] if savegif else None, fps=30) + + if nb_iter % 20000 == 0 : + for ii in range(4): + draw_pred[ii] = draw_pred[ii].unsqueeze(0) + tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/pred_eval'+str(ii), title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'pred'+str(ii)+'.gif')] if savegif else None, fps=30) + + if accelerator is None or accelerator.is_main_process: + if mpjpe < best_mpjpe : + msg = f"--> --> \t mpjpe Improved from {best_mpjpe:.5f} to {mpjpe:.5f} !!!" + logger.info(msg) + best_mpjpe = mpjpe + if save: + torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_mpjpe.pth')) + if save: + torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_last.pth')) + + net.train() + return best_iter, best_mpjpe, writer, logger + + +# Single-GPU evaluation of text to motion model (test time): +@torch.no_grad() +def evaluation_transformer_272_single(val_loader, net, trans, tokenize_model, logger, evaluator, cfg=4.0, device=torch.device('cuda'), unit_length=4): + textencoder, motionencoder = evaluator + trans.eval() + + draw_org = [] + draw_pred = [] + draw_text = [] + draw_text_pred = [] + + motion_annotation_list = [] + motion_pred_list = [] + R_precision_real = torch.tensor([0,0,0], device=device) + R_precision = torch.tensor([0,0,0], device=device) + matching_score_real = torch.tensor(0.0, device=device) + matching_score_pred = torch.tensor(0.0, device=device) + + nb_sample = torch.tensor(0, device=device) + + for batch in val_loader: + text, pose, m_length = batch + bs, seq = pose.shape[:2] + num_joints = 22 + pred_pose_eval = torch.zeros((bs, seq, pose.shape[-1])).to(device) + pred_len = torch.ones(bs).long() + + for k in range(bs): + index_motion = trans.sample_for_eval_CFG(text[k:k+1], length=m_length[k], tokenize_model=tokenize_model, device=device, unit_length=unit_length, cfg=cfg) + pred_pose = net.forward_decoder(index_motion) + cur_len = pred_pose.shape[1] + pred_len[k] = min(cur_len, seq) + pred_pose_eval[k:k+1, :cur_len] = pred_pose[:, :seq] + + et_pred, em_pred = textencoder(text).loc, motionencoder(pred_pose_eval, pred_len).loc + + pose = pose.to(device).float() + et, em = textencoder(text).loc, motionencoder(pose, m_length).loc + motion_annotation_list.append(em) + motion_pred_list.append(em_pred) + + temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True) + R_precision_real += torch.tensor(temp_R, device=device) + matching_score_real += torch.tensor(temp_match, device=device) + temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True) + R_precision += torch.tensor(temp_R, device=device) + matching_score_pred += torch.tensor(temp_match, device=device) + nb_sample += et.shape[0] + + pose = torch.tensor(pose).to(device) + + motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() + motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy() + + gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np) + mu, cov = calculate_activation_statistics(motion_pred_np) + + diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100) + diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100) + + R_precision_real = R_precision_real / nb_sample + R_precision = R_precision / nb_sample + matching_score_real = matching_score_real / nb_sample + matching_score_pred = matching_score_pred / nb_sample + fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) + + msg = f"--> \t Eval. :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity Pred. {diversity:.4f}, R_precision Real. {R_precision_real}, R_precision Pred. {R_precision}, MM-dist (matching_score) Real. {matching_score_real}, MM-dist (matching_score) Pred. {matching_score_pred}" + logger.info(msg) + + return fid, diversity, R_precision[0], R_precision[1], R_precision[2], matching_score_pred, logger + +def euclidean_distance_matrix(matrix1, matrix2): + assert matrix1.shape[1] == matrix2.shape[1] + d1 = -2 * np.dot(matrix1, matrix2.T) + d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) + d3 = np.sum(np.square(matrix2), axis=1) + dists = np.sqrt(d1 + d2 + d3) + return dists + + + +def calculate_top_k(mat, top_k): + size = mat.shape[0] + gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1) + bool_mat = (mat == gt_mat) + correct_vec = False + top_k_list = [] + for i in range(top_k): + correct_vec = (correct_vec | bool_mat[:, i]) + top_k_list.append(correct_vec[:, None]) + top_k_mat = np.concatenate(top_k_list, axis=1) + return top_k_mat + + +def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False): + dist_mat = euclidean_distance_matrix(embedding1, embedding2) + matching_score = dist_mat.trace() + argmax = np.argsort(dist_mat, axis=1) + top_k_mat = calculate_top_k(argmax, top_k) + if sum_all: + return top_k_mat.sum(axis=0), matching_score + else: + return top_k_mat, matching_score + + + +def calculate_diversity(activation, diversity_times): + assert len(activation.shape) == 2 + assert activation.shape[0] > diversity_times + num_samples = activation.shape[0] + + first_indices = np.random.choice(num_samples, diversity_times, replace=False) + second_indices = np.random.choice(num_samples, diversity_times, replace=False) + dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1) + return dist.mean() + + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) + + +def calculate_activation_statistics(activations): + mu = np.mean(activations, axis=0) + cov = np.cov(activations, rowvar=False) + return mu, cov + + +def calculate_frechet_feature_distance(feature_list1, feature_list2): + feature_list1 = np.stack(feature_list1) + feature_list2 = np.stack(feature_list2) + + mean = np.mean(feature_list1, axis=0) + std = np.std(feature_list1, axis=0) + 1e-10 + feature_list1 = (feature_list1 - mean) / std + feature_list2 = (feature_list2 - mean) / std + + dist = calculate_frechet_distance( + mu1=np.mean(feature_list1, axis=0), + sigma1=np.cov(feature_list1, rowvar=False), + mu2=np.mean(feature_list2, axis=0), + sigma2=np.cov(feature_list2, rowvar=False), + ) + return dist diff --git a/utils/face_z_align_util.py b/utils/face_z_align_util.py new file mode 100644 index 0000000000000000000000000000000000000000..55ec5cdfedc99b9fc5b8709414642a047c430bf8 --- /dev/null +++ b/utils/face_z_align_util.py @@ -0,0 +1,1017 @@ +import torch +import numpy as np + +_EPS4 = np.finfo(float).eps * 4.0 + +_FLOAT_EPS = np.finfo(float).eps + +def qinv(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + mask = torch.ones_like(q) + mask[..., 1:] = -mask[..., 1:] + return q * mask + + +def qinv_np(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return qinv(torch.from_numpy(q).float()).numpy() + + +def qnormalize(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return q / torch.norm(q, dim=-1, keepdim=True) + + +def qmul(q, r): + ''' + Multiply quaternion(s) q with quaternion(s) r. + Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. + Returns q*r as a tensor of shape (*, 4). + ''' + assert q.shape[-1] == 4 + assert r.shape[-1] == 4 + original_shape = q.shape + # Compute outer product + terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) + w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] + x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] + y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] + z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] + return torch.stack((w, x, y, z), dim=1).view(original_shape) + + +def qrot(q, v): + ''' + Rotate vector(s) v about the rotation described by quaternion(s) q. + Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, + where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + ''' + assert q.shape[-1] == 4 + assert v.shape[-1] == 3 + assert q.shape[:-1] == v.shape[:-1] + + original_shape = list(v.shape) + q = q.contiguous().view(-1, 4) + v = v.contiguous().view(-1, 3) + + qvec = q[:, 1:] + uv = torch.cross(qvec, v, dim=1) + uuv = torch.cross(qvec, uv, dim=1) + return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) + + +def qeuler(q, order, epsilon=0, deg=True): + """ + Convert quaternion(s) q to Euler angles. + Expects a tensor of shape (*, 4), where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + + original_shape = list(q.shape) + original_shape[-1] = 3 + q = q.view(-1, 4) + + q0 = q[:, 0] + q1 = q[:, 1] + q2 = q[:, 2] + q3 = q[:, 3] + + if order == 'xyz': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + elif order == 'yzx': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon)) + elif order == 'zxy': + x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'xzy': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon)) + elif order == 'yxz': + x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'zyx': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + else: + raise ValueError(f"Invalid order: {order}") + + if deg: + return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi + else: + return torch.stack((x, y, z), dim=1).view(original_shape) + + + +def qmul_np(q, r): + q = torch.from_numpy(q).contiguous().float() + r = torch.from_numpy(r).contiguous().float() + return qmul(q, r).numpy() + + +def qrot_np(q, v): + q = torch.from_numpy(q).contiguous().float() + v = torch.from_numpy(v).contiguous().float() + return qrot(q, v).numpy() + + +def qeuler_np(q, order, epsilon=0, use_gpu=False): + if use_gpu: + q = torch.from_numpy(q).cuda().float() + return qeuler(q, order, epsilon).cpu().numpy() + else: + q = torch.from_numpy(q).contiguous().float() + return qeuler(q, order, epsilon).numpy() + + +def qfix(q): + ''' + Enforce quaternion continuity across the time dimension by selecting + the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) + between two consecutive frames. + ''' + assert len(q.shape) == 3 + assert q.shape[-1] == 4 + + result = q.copy() + dot_products = np.sum(q[1:] * q[:-1], axis=2) + mask = dot_products < 0 + mask = (np.cumsum(mask, axis=0) % 2).astype(bool) + result[1:][mask] *= -1 + return result + + +def euler2quat(e, order, deg=True): + ''' + Convert Euler angles to quaternions. + ''' + assert e.shape[-1] == 3 + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.view(-1, 3) + if deg: + e = e * np.pi / 180. + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1) + ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1) + rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1) + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise ValueError(f"Invalid order: {order}") + if result is None: + result = r + else: + result = qmul(result, r) + + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.view(original_shape) + + +def expmap_to_quaternion(e): + ''' + Convert axis-angle rotations (aka exponential maps) to quaternions. + Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". + Expects a tensor of shape (*, 3), where * denotes any number of dimensions. + Returns a tensor of shape (*, 4). + ''' + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.reshape(-1, 3) + theta = np.linalg.norm(e, axis=1).reshape(-1, 1) + w = np.cos(0.5 * theta).reshape(-1, 1) + xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e + return np.concatenate((w, xyz), axis=1).reshape(original_shape) + + +def euler_to_quaternion(e, order): + ''' + Convert Euler angles to quaternions. + ''' + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.reshape(-1, 3) + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1) + ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1) + rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1) + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise ValueError(f"Invalid order: {order}") + if result is None: + result = r + else: + result = qmul_np(result, r) + + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.reshape(original_shape) + + +def quaternion_to_matrix(quaternions): + ''' + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + ''' + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def quaternion_to_matrix_np(quaternions): + q = torch.from_numpy(quaternions).contiguous().float() + return quaternion_to_matrix(q).numpy() + + +def quaternion_to_cont6d_np(quaternions): + rotation_mat = quaternion_to_matrix_np(quaternions) + cont_6d = np.concatenate([rotation_mat[..., 0], rotation_mat[..., 1]], axis=-1) + return cont_6d + + +def quaternion_to_cont6d(quaternions): + rotation_mat = quaternion_to_matrix(quaternions) + cont_6d = torch.cat([rotation_mat[..., 0], rotation_mat[..., 1]], dim=-1) + return cont_6d + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + ''' + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalization per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + + Returns: + batch of rotation matrices of size (*, 3, 3) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + ''' + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + + +def qpow(q0, t, dtype=torch.float): + ''' + q0 : tensor of quaternions + t: tensor of powers + ''' + q0 = qnormalize(q0) + theta0 = torch.acos(q0[..., 0]) + + ## if theta0 is close to zero, add epsilon to avoid NaNs + mask = (theta0 <= 10e-10) * (theta0 >= -10e-10) + theta0 = (1 - mask) * theta0 + mask * 10e-10 + v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1) + + if isinstance(t, torch.Tensor): + q = torch.zeros(t.shape + q0.shape) + theta = t.view(-1, 1) * theta0.view(1, -1) + else: # if t is a number + q = torch.zeros(q0.shape) + theta = t * theta0 + + q[..., 0] = torch.cos(theta) + q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1) + + return q.to(dtype) + + +def qslerp(q0, q1, t): + ''' + q0: starting quaternion + q1: ending quaternion + t: array of points along the way + + Returns: + Tensor of Slerps: t.shape + q0.shape + ''' + + q0 = qnormalize(q0) + q1 = qnormalize(q1) + q_ = qpow(qmul(q1, qinv(q0)), t) + + return qmul(q_, + q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous()) + + +def qbetween(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v = torch.cross(v0, v1) + w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1, + keepdim=True) + return qnormalize(torch.cat([w, v], dim=-1)) + + +def qbetween_np(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v0 = torch.from_numpy(v0).float() + v1 = torch.from_numpy(v1).float() + return qbetween(v0, v1).numpy() + + +def lerp(p0, p1, t): + if not isinstance(t, torch.Tensor): + t = torch.Tensor([t]) + + new_shape = t.shape + p0.shape + new_view_t = t.shape + torch.Size([1] * len(p0.shape)) + new_view_p = torch.Size([1] * len(t.shape)) + p0.shape + p0 = p0.view(new_view_p).expand(new_shape) + p1 = p1.view(new_view_p).expand(new_shape) + t = t.view(new_view_t).expand(new_shape) + + return p0 + t * (p1 - p0) + +joint_idx = (0,1,2,4,5,7,8,12,16,17,18,19,20,21,60,61,62,63,64,65,59,58,57,56,55, # body joints + 37,38,39,66,25,26,27,67,28,29,30,68,34,35,36,69,31,32,33,70, # left hand joints + 52,53,54,71,40,41,42,72,43,44,45,73,49,50,51,74,46,47,48,75, # right hand joints + 22,15, # jaw, head + 57,56, # eyeballs + 76,77,78,79,80,81,82,83,84,85, # eyebrow + 86,87,88,89, # nose + 90,91,92,93,94, # below nose + 95,96,97,98,99,100,101,102,103,104,105,106, # eyes + 107, # right mouth + 108,109,110,111,112, # upper mouth + 113, # left mouth + 114,115,116,117,118, # lower mouth + 119, # right lip + 120,121,122, # upper lip + 123, # left lip + 124,125,126, # lower lip + 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143 # face contour + ) + +def face_z_transform(positions, global_orient, trans): + ''' + positions: [num_frame, num_joints, 3] + global_orient: [num_frame, 3] + ''' + joints_name = \ + ('Pelvis', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Neck', 'L_Shoulder', 'R_Shoulder', + 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Big_toe', 'L_Small_toe', 'L_Heel', 'R_Big_toe', 'R_Small_toe', + 'R_Heel', 'L_Ear', 'R_Ear', 'L_Eye', 'R_Eye', 'Nose', # body joints + 'L_Thumb_1', 'L_Thumb_2', 'L_Thumb_3', 'L_Thumb_4', 'L_Index_1', 'L_Index_2', 'L_Index_3', 'L_Index_4', + 'L_Middle_1', 'L_Middle_2', 'L_Middle_3', 'L_Middle_4', 'L_Ring_1', 'L_Ring_2', 'L_Ring_3', 'L_Ring_4', + 'L_Pinky_1', 'L_Pinky_2', 'L_Pinky_3', 'L_Pinky_4', # left hand joints + 'R_Thumb_1', 'R_Thumb_2', 'R_Thumb_3', 'R_Thumb_4', 'R_Index_1', 'R_Index_2', 'R_Index_3', 'R_Index_4', + 'R_Middle_1', 'R_Middle_2', 'R_Middle_3', 'R_Middle_4', 'R_Ring_1', 'R_Ring_2', 'R_Ring_3', 'R_Ring_4', + 'R_Pinky_1', 'R_Pinky_2', 'R_Pinky_3', 'R_Pinky_4', # right hand joints + *['Face_' + str(i) for i in range(1, 73)] + ) + root_pos_init = positions[0] + + assert root_pos_init.shape[0]==len(joints_name) + '''All initially face Z+''' + r_hip, l_hip, sdr_r, sdr_l = joints_name.index('R_Hip'), joints_name.index('L_Hip'), joints_name.index('R_Shoulder'), joints_name.index('L_Shoulder') + across1 = root_pos_init[r_hip] - root_pos_init[l_hip] + across2 = root_pos_init[sdr_r] - root_pos_init[sdr_l] + across = across1 + across2 + across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis] + + forward_init = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + forward_init = forward_init / np.sqrt((forward_init ** 2).sum(axis=-1))[..., np.newaxis] + + target = np.array([[0, 0, 1]]) + root_quat_init = qbetween_np(forward_init, target) + root_quat_init = np.ones(global_orient.shape[:-1] + (4,)) * root_quat_init + root_quat_init = torch.tensor(root_quat_init, dtype=torch.float32).float().cuda() + + root_matrix_init = quaternion_to_matrix(root_quat_init) + global_orient_matrix = axis_angle_to_matrix(global_orient) + global_orient_matrix = torch.matmul(root_matrix_init, global_orient_matrix) + global_orient = matrix_to_axis_angle(global_orient_matrix) + + trans = trans.cpu().numpy() + '''Put on Floor''' + floor_height = positions.min(axis=0).min(axis=0)[1] + trans[:, 1] -= floor_height + + '''XZ at origin''' + root_pos_init = positions[0] + root_pose_init_xz = root_pos_init[0] * np.array([1, 0, 1]) + trans = trans - root_pose_init_xz + + '''All initially face Z+''' + trans = torch.from_numpy(trans).float().cuda() + trans = qrot(root_quat_init, trans) + + return global_orient, trans + + +import functools +from typing import Optional + +import torch +import torch.nn.functional as F + +''' +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) +This matrix can be applied to column vectors by post multiplication +by the points e.g. + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +''' + + +def quaternion_to_matrix(quaternions): + ''' + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + ''' + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a, b): + ''' + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + Returns: + Tensor of the same shape as a with the signs of b. + ''' + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x): + ''' + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + ''' + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + + +def matrix_to_quaternion(matrix): + ''' + Convert rotations given as rotation matrices to quaternions. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + Returns: + quaternions with real part first, as tensor of shape (..., 4). + ''' + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + m00 = matrix[..., 0, 0] + m11 = matrix[..., 1, 1] + m22 = matrix[..., 2, 2] + o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22) + x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22) + y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22) + z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22) + o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2]) + o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0]) + o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1]) + return torch.stack((o0, o1, o2, o3), -1) + + +def _axis_angle_rotation(axis: str, angle): + ''' + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + ''' + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + if axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + if axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles, convention: str): + ''' + Convert rotations given as Euler angles in radians to rotation matrices. + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + ''' + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) + return functools.reduce(torch.matmul, matrices) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +): + ''' + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + ''' + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str): + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + + +def matrix_to_euler_angles(matrix, convention: str): + ''' + Convert rotations given as rotation matrices to Euler angles in radians. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + Returns: + Euler angles in radians as tensor of shape (..., 3). + ''' + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + ''' + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + Returns: + Quaternions as tensor of shape (N, 4). + ''' + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + ''' + Generate random rotations as 3x3 rotation matrices. + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + ''' + quaternions = random_quaternions( + n, dtype=dtype, device=device, requires_grad=requires_grad + ) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + ''' + Generate a single random 3x3 rotation matrix. + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + requires_grad: Whether the resulting tensor should have the gradient + flag set + Returns: + Rotation matrix as tensor of shape (3, 3). + ''' + return random_rotations(1, dtype, device, requires_grad)[0] + + +def standardize_quaternion(quaternions): + ''' + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Standardized quaternions as tensor of shape (..., 4). + ''' + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a, b): + ''' + Multiply two quaternions. + Usual torch rules for broadcasting apply. + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + ''' + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a, b): + ''' + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + ''' + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion): + ''' + Given a quaternion representing rotation, get the quaternion representing + its inverse. + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + ''' + + return quaternion * quaternion.new_tensor([1, -1, -1, -1]) + + +def quaternion_apply(quaternion, point): + ''' + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + Returns: + Tensor of rotated points of shape (..., 3). + ''' + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, f{point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle): + ''' + Convert rotations given as axis/angle to rotation matrices. + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + ''' + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix): + ''' + Convert rotations given as rotation matrices to axis/angle. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + ''' + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle): + ''' + Convert rotations given as axis/angle to quaternions. + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + Returns: + quaternions with real part first, as tensor of shape (..., 4). + ''' + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions): + ''' + Convert rotations given as quaternions to axis/angle. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + ''' + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + ''' + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalisation per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + Returns: + batch of rotation matrices of size (*, 3, 3) + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + ''' + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + ''' + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + Returns: + 6D rotation representation, of size (*, 6) + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + ''' + return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) + + +def canonicalize_smplh(poses, trans=None): + bs, nframes, njoints = poses.shape[:3] + + global_orient = poses[:, :, 0] + + # first global rotations + rot2d = matrix_to_axis_angle(global_orient[:, 0]) + # rot2d[:, :2] = 0 # Remove the rotation along the vertical axis + rot2d = axis_angle_to_matrix(rot2d) + + # Rotate the global rotation to eliminate Z rotations + global_orient = torch.einsum("ikj,imkl->imjl", rot2d, global_orient) + + # Construct canonicalized version of x + xc = torch.cat((global_orient[:, :, None], poses[:, :, 1:]), dim=2) + + if trans is not None: + vel = trans[:, 1:] - trans[:, :-1] + + vel = torch.einsum("ikj,ilk->ilj", rot2d, vel) + trans = torch.cat((torch.zeros(bs, 1, 3, device=vel.device), + torch.cumsum(vel, 1)), 1) + return xc, trans + else: + return xc + + + +def matrix_of_angles(cos, sin, inv=False, dim=2): + assert dim in [2, 3] + sin = -sin if inv else sin + if dim == 2: + row1 = torch.stack((cos, -sin), axis=-1) + row2 = torch.stack((sin, cos), axis=-1) + return torch.stack((row1, row2), axis=-2) + elif dim == 3: + row1 = torch.stack((cos, -sin, 0 * cos), axis=-1) + row2 = torch.stack((sin, cos, 0 * cos), axis=-1) + row3 = torch.stack((0 * sin, 0 * cos, 1 + 0 * cos), axis=-1) + return torch.stack((row1, row2, row3), axis=-2) + diff --git a/utils/human_models.py b/utils/human_models.py new file mode 100644 index 0000000000000000000000000000000000000000..daf87e7029eca1a7bf090c04a63135431dff2146 --- /dev/null +++ b/utils/human_models.py @@ -0,0 +1,241 @@ +import numpy as np +import torch +import os.path as osp +from .config import HUMAN_MODEL_PATH +from utils.smplx import smplx +import pickle +from utils.transforms import transform_joint_to_other_db + +class SMPLX(object): + def __init__(self): + self.layer_arg = {'create_global_orient': False, 'create_body_pose': False, 'create_left_hand_pose': False, 'create_right_hand_pose': False, 'create_jaw_pose': False, 'create_leye_pose': False, 'create_reye_pose': False, 'create_betas': False, 'create_expression': False, 'create_transl': False} + self.layer = {'neutral': smplx.create(HUMAN_MODEL_PATH, 'smplx', gender='NEUTRAL', use_pca=False, use_face_contour=True, **self.layer_arg), + 'male': smplx.create(HUMAN_MODEL_PATH, 'smplx', gender='MALE', use_pca=False, use_face_contour=True, **self.layer_arg), + 'female': smplx.create(HUMAN_MODEL_PATH, 'smplx', gender='FEMALE', use_pca=False, use_face_contour=True, **self.layer_arg) + } + self.vertex_num = 10475 + self.face = self.layer['neutral'].faces + self.shape_param_dim = 10 + self.expr_code_dim = 10 + with open(osp.join(HUMAN_MODEL_PATH, 'smplx', 'SMPLX_to_J14.pkl'), 'rb') as f: + self.j14_regressor = pickle.load(f, encoding='latin1') + with open(osp.join(HUMAN_MODEL_PATH, 'smplx', 'MANO_SMPLX_vertex_ids.pkl'), 'rb') as f: + self.hand_vertex_idx = pickle.load(f, encoding='latin1') + self.face_vertex_idx = np.load(osp.join(HUMAN_MODEL_PATH, 'smplx', 'SMPL-X__FLAME_vertex_ids.npy')) + self.J_regressor = self.layer['neutral'].J_regressor.numpy() + self.J_regressor_idx = {'pelvis': 0, 'lwrist': 20, 'rwrist': 21, 'neck': 12} + self.orig_hand_regressor = self.make_hand_regressor() + #self.orig_hand_regressor = {'left': self.layer.J_regressor.numpy()[[20,37,38,39,25,26,27,28,29,30,34,35,36,31,32,33],:], 'right': self.layer.J_regressor.numpy()[[21,52,53,54,40,41,42,43,44,45,49,50,51,46,47,48],:]} + + # original SMPLX joint set + self.orig_joint_num = 53 # 22 (body joints) + 30 (hand joints) + 1 (face jaw joint) + self.orig_joints_name = \ + ('Pelvis', 'L_Hip', 'R_Hip', 'Spine_1', 'L_Knee', 'R_Knee', 'Spine_2', 'L_Ankle', 'R_Ankle', 'Spine_3', 'L_Foot', 'R_Foot', 'Neck', 'L_Collar', 'R_Collar', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', # body joints + 'L_Index_1', 'L_Index_2', 'L_Index_3', 'L_Middle_1', 'L_Middle_2', 'L_Middle_3', 'L_Pinky_1', 'L_Pinky_2', 'L_Pinky_3', 'L_Ring_1', 'L_Ring_2', 'L_Ring_3', 'L_Thumb_1', 'L_Thumb_2', 'L_Thumb_3', # left hand joints + 'R_Index_1', 'R_Index_2', 'R_Index_3', 'R_Middle_1', 'R_Middle_2', 'R_Middle_3', 'R_Pinky_1', 'R_Pinky_2', 'R_Pinky_3', 'R_Ring_1', 'R_Ring_2', 'R_Ring_3', 'R_Thumb_1', 'R_Thumb_2', 'R_Thumb_3', # right hand joints + 'Jaw' # face jaw joint + ) + self.orig_flip_pairs = \ + ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), # body joints + (22,37), (23,38), (24,39), (25,40), (26,41), (27,42), (28,43), (29,44), (30,45), (31,46), (32,47), (33,48), (34,49), (35,50), (36,51) # hand joints + ) + self.orig_root_joint_idx = self.orig_joints_name.index('Pelvis') + self.orig_joint_part = \ + {'body': range(self.orig_joints_name.index('Pelvis'), self.orig_joints_name.index('R_Wrist')+1), + 'lhand': range(self.orig_joints_name.index('L_Index_1'), self.orig_joints_name.index('L_Thumb_3')+1), + 'rhand': range(self.orig_joints_name.index('R_Index_1'), self.orig_joints_name.index('R_Thumb_3')+1), + 'face': range(self.orig_joints_name.index('Jaw'), self.orig_joints_name.index('Jaw')+1)} + + # changed SMPLX joint set for the supervision + self.joint_num = 137 # 25 (body joints) + 40 (hand joints) + 72 (face keypoints) + self.joints_name = \ + ('Pelvis', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Neck', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Big_toe', 'L_Small_toe', 'L_Heel', 'R_Big_toe', 'R_Small_toe', 'R_Heel', 'L_Ear', 'R_Ear', 'L_Eye', 'R_Eye', 'Nose',# body joints + 'L_Thumb_1', 'L_Thumb_2', 'L_Thumb_3', 'L_Thumb_4', 'L_Index_1', 'L_Index_2', 'L_Index_3', 'L_Index_4', 'L_Middle_1', 'L_Middle_2', 'L_Middle_3', 'L_Middle_4', 'L_Ring_1', 'L_Ring_2', 'L_Ring_3', 'L_Ring_4', 'L_Pinky_1', 'L_Pinky_2', 'L_Pinky_3', 'L_Pinky_4', # left hand joints + 'R_Thumb_1', 'R_Thumb_2', 'R_Thumb_3', 'R_Thumb_4', 'R_Index_1', 'R_Index_2', 'R_Index_3', 'R_Index_4', 'R_Middle_1', 'R_Middle_2', 'R_Middle_3', 'R_Middle_4', 'R_Ring_1', 'R_Ring_2', 'R_Ring_3', 'R_Ring_4', 'R_Pinky_1', 'R_Pinky_2', 'R_Pinky_3', 'R_Pinky_4', # right hand joints + *['Face_' + str(i) for i in range(1,73)] # face keypoints (too many keypoints... omit real names. have same name of keypoints defined in FLAME class) + ) + self.root_joint_idx = self.joints_name.index('Pelvis') + self.lwrist_idx = self.joints_name.index('L_Wrist') + self.rwrist_idx = self.joints_name.index('R_Wrist') + self.neck_idx = self.joints_name.index('Neck') + self.flip_pairs = \ + ( (1,2), (3,4), (5,6), (8,9), (10,11), (12,13), (14,17), (15,18), (16,19), (20,21), (22,23), # body joints + (25,45), (26,46), (27,47), (28,48), (29,49), (30,50), (31,51), (32,52), (33,53), (34,54), (35,55), (36,56), (37,57), (38,58), (39,59), (40,60), (41,61), (42,62), (43,63), (44,64), # hand joints + (67,68), # face eyeballs + (69,78), (70,77), (71,76), (72,75), (73,74), # face eyebrow + (83,87), (84,86), # face below nose + (88,97), (89,96), (90,95), (91,94), (92,99), (93,98), # face eyes + (100,106), (101,105), (102,104), (107,111), (108,110), # face mouth + (112,116), (113,115), (117,119), # face lip + (120,136), (121,135), (122,134), (123,133), (124,132), (125,131), (126,130), (127,129) # face contours + ) + self.joint_idx = \ + (0,1,2,4,5,7,8,12,16,17,18,19,20,21,60,61,62,63,64,65,59,58,57,56,55, # body joints + 37,38,39,66,25,26,27,67,28,29,30,68,34,35,36,69,31,32,33,70, # left hand joints + 52,53,54,71,40,41,42,72,43,44,45,73,49,50,51,74,46,47,48,75, # right hand joints + 22,15, # jaw, head + 57,56, # eyeballs + 76,77,78,79,80,81,82,83,84,85, # eyebrow + 86,87,88,89, # nose + 90,91,92,93,94, # below nose + 95,96,97,98,99,100,101,102,103,104,105,106, # eyes + 107, # right mouth + 108,109,110,111,112, # upper mouth + 113, # left mouth + 114,115,116,117,118, # lower mouth + 119, # right lip + 120,121,122, # upper lip + 123, # left lip + 124,125,126, # lower lip + 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143 # face contour + ) + self.joint_part = \ + {'body': range(self.joints_name.index('Pelvis'), self.joints_name.index('Nose')+1), + 'lhand': range(self.joints_name.index('L_Thumb_1'), self.joints_name.index('L_Pinky_4')+1), + 'rhand': range(self.joints_name.index('R_Thumb_1'), self.joints_name.index('R_Pinky_4')+1), + 'hand': range(self.joints_name.index('L_Thumb_1'), self.joints_name.index('R_Pinky_4')+1), + 'face': range(self.joints_name.index('Face_1'), self.joints_name.index('Face_72')+1)} + + # changed SMPLX joint set for PositionNet prediction + self.pos_joint_num = 65 # 25 (body joints) + 40 (hand joints) + self.pos_joints_name = \ + ('Pelvis', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Neck', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Big_toe', 'L_Small_toe', 'L_Heel', 'R_Big_toe', 'R_Small_toe', 'R_Heel', 'L_Ear', 'R_Ear', 'L_Eye', 'R_Eye', 'Nose', # body joints + 'L_Thumb_1', 'L_Thumb_2', 'L_Thumb_3', 'L_Thumb_4', 'L_Index_1', 'L_Index_2', 'L_Index_3', 'L_Index_4', 'L_Middle_1', 'L_Middle_2', 'L_Middle_3', 'L_Middle_4', 'L_Ring_1', 'L_Ring_2', 'L_Ring_3', 'L_Ring_4', 'L_Pinky_1', 'L_Pinky_2', 'L_Pinky_3', 'L_Pinky_4', # left hand joints + 'R_Thumb_1', 'R_Thumb_2', 'R_Thumb_3', 'R_Thumb_4', 'R_Index_1', 'R_Index_2', 'R_Index_3', 'R_Index_4', 'R_Middle_1', 'R_Middle_2', 'R_Middle_3', 'R_Middle_4', 'R_Ring_1', 'R_Ring_2', 'R_Ring_3', 'R_Ring_4', 'R_Pinky_1', 'R_Pinky_2', 'R_Pinky_3', 'R_Pinky_4', # right hand joints + ) + self.pos_joint_part = \ + {'body': range(self.pos_joints_name.index('Pelvis'), self.pos_joints_name.index('Nose')+1), + 'lhand': range(self.pos_joints_name.index('L_Thumb_1'), self.pos_joints_name.index('L_Pinky_4')+1), + 'rhand': range(self.pos_joints_name.index('R_Thumb_1'), self.pos_joints_name.index('R_Pinky_4')+1), + 'hand': range(self.pos_joints_name.index('L_Thumb_1'), self.pos_joints_name.index('R_Pinky_4')+1)} + self.pos_joint_part['L_MCP'] = [self.pos_joints_name.index('L_Index_1') - len(self.pos_joint_part['body']), + self.pos_joints_name.index('L_Middle_1') - len(self.pos_joint_part['body']), + self.pos_joints_name.index('L_Ring_1') - len(self.pos_joint_part['body']), + self.pos_joints_name.index('L_Pinky_1') - len(self.pos_joint_part['body'])] + self.pos_joint_part['R_MCP'] = [self.pos_joints_name.index('R_Index_1') - len(self.pos_joint_part['body']) - len(self.pos_joint_part['lhand']), + self.pos_joints_name.index('R_Middle_1') - len(self.pos_joint_part['body']) - len(self.pos_joint_part['lhand']), + self.pos_joints_name.index('R_Ring_1') - len(self.pos_joint_part['body']) - len(self.pos_joint_part['lhand']), + self.pos_joints_name.index('R_Pinky_1') - len(self.pos_joint_part['body']) - len(self.pos_joint_part['lhand'])] + + def make_hand_regressor(self): + regressor = self.layer['neutral'].J_regressor.numpy() + lhand_regressor = np.concatenate((regressor[[20,37,38,39],:], + np.eye(self.vertex_num)[5361,None], + regressor[[25,26,27],:], + np.eye(self.vertex_num)[4933,None], + regressor[[28,29,30],:], + np.eye(self.vertex_num)[5058,None], + regressor[[34,35,36],:], + np.eye(self.vertex_num)[5169,None], + regressor[[31,32,33],:], + np.eye(self.vertex_num)[5286,None])) + rhand_regressor = np.concatenate((regressor[[21,52,53,54],:], + np.eye(self.vertex_num)[8079,None], + regressor[[40,41,42],:], + np.eye(self.vertex_num)[7669,None], + regressor[[43,44,45],:], + np.eye(self.vertex_num)[7794,None], + regressor[[49,50,51],:], + np.eye(self.vertex_num)[7905,None], + regressor[[46,47,48],:], + np.eye(self.vertex_num)[8022,None])) + hand_regressor = {'left': lhand_regressor, 'right': rhand_regressor} + return hand_regressor + + + def reduce_joint_set(self, joint): + new_joint = [] + for name in self.pos_joints_name: + idx = self.joints_name.index(name) + new_joint.append(joint[:,idx,:]) + new_joint = torch.stack(new_joint,1) + return new_joint + +class SMPL(object): + def __init__(self): + self.layer_arg = {'create_body_pose': False, 'create_betas': False, 'create_global_orient': False, 'create_transl': False} + self.layer = {'neutral': smplx.create(HUMAN_MODEL_PATH, 'smpl', gender='NEUTRAL', **self.layer_arg), 'male': smplx.create(HUMAN_MODEL_PATH, 'smpl', gender='MALE', **self.layer_arg), 'female': smplx.create(HUMAN_MODEL_PATH, 'smpl', gender='FEMALE', **self.layer_arg)} + self.vertex_num = 6890 + self.face = self.layer['neutral'].faces + self.shape_param_dim = 10 + self.vposer_code_dim = 32 + + # original SMPL joint set + self.orig_joint_num = 24 + self.orig_joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Spine_1', 'L_Knee', 'R_Knee', 'Spine_2', 'L_Ankle', 'R_Ankle', 'Spine_3', 'L_Foot', 'R_Foot', 'Neck', 'L_Collar', 'R_Collar', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand') + self.orig_flip_pairs = ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), (22,23) ) + self.orig_root_joint_idx = self.orig_joints_name.index('Pelvis') + self.orig_joint_regressor = self.layer['neutral'].J_regressor.numpy().astype(np.float32) + + self.joint_num = self.orig_joint_num + self.joints_name = self.orig_joints_name + self.flip_pairs = self.orig_flip_pairs + self.root_joint_idx = self.orig_root_joint_idx + self.joint_regressor = self.orig_joint_regressor + +class MANO(object): + def __init__(self): + self.layer_arg = {'create_global_orient': False, 'create_hand_pose': False, 'create_betas': False, 'create_transl': False} + self.layer = {'right': smplx.create(HUMAN_MODEL_PATH, 'mano', is_rhand=True, use_pca=False, flat_hand_mean=False, **self.layer_arg), + 'left': smplx.create(HUMAN_MODEL_PATH, 'mano', is_rhand=False, use_pca=False, flat_hand_mean=False, **self.layer_arg)} + self.vertex_num = 778 + self.face = {'right': self.layer['right'].faces, 'left': self.layer['left'].faces} + self.shape_param_dim = 10 + + if torch.sum(torch.abs(self.layer['left'].shapedirs[:,0,:] - self.layer['right'].shapedirs[:,0,:])) < 1: + print('Fix shapedirs bug of MANO') + self.layer['left'].shapedirs[:,0,:] *= -1 + + # original MANO joint set + self.orig_joint_num = 16 + self.orig_joints_name = ('Wrist', 'Index_1', 'Index_2', 'Index_3', 'Middle_1', 'Middle_2', 'Middle_3', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Ring_1', 'Ring_2', 'Ring_3', 'Thumb_1', 'Thumb_2', 'Thumb_3') + self.orig_root_joint_idx = self.orig_joints_name.index('Wrist') + self.orig_flip_pairs = () + self.orig_joint_regressor = self.layer['right'].J_regressor.numpy() # same for the right and left hands + + # changed MANO joint set + self.joint_num = 21 # manually added fingertips + self.joints_name = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1', 'Middle_2', 'Middle_3', 'Middle_4', 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinky_4') + self.skeleton = ( (0,1), (0,5), (0,9), (0,13), (0,17), (1,2), (2,3), (3,4), (5,6), (6,7), (7,8), (9,10), (10,11), (11,12), (13,14), (14,15), (15,16), (17,18), (18,19), (19,20) ) + self.root_joint_idx = self.joints_name.index('Wrist') + self.flip_pairs = () + # add fingertips to joint_regressor + self.joint_regressor = transform_joint_to_other_db(self.orig_joint_regressor, self.orig_joints_name, self.joints_name) + self.joint_regressor[self.joints_name.index('Thumb_4')] = np.array([1 if i == 745 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor[self.joints_name.index('Index_4')] = np.array([1 if i == 317 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor[self.joints_name.index('Middle_4')] = np.array([1 if i == 445 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor[self.joints_name.index('Ring_4')] = np.array([1 if i == 556 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor[self.joints_name.index('Pinky_4')] = np.array([1 if i == 673 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + +class FLAME(object): + def __init__(self): + self.layer_arg = {'create_betas': False, 'create_expression': False, 'create_global_orient': False, 'create_neck_pose': False, 'create_jaw_pose': False, 'create_leye_pose': False, 'create_reye_pose': False, 'create_transl': False} + self.layer = smplx.create(HUMAN_MODEL_PATH, 'flame', use_face_contour=True, **self.layer_arg) + self.vertex_num = 5023 + self.face = self.layer.faces + self.shape_param_dim = 10 + self.expr_code_dim = 10 + + # FLAME joint set + self.orig_joint_num = 73 + self.orig_flip_pairs = ( (3,4), # eyeballs + (5,14), (6,13), (7,12), (8,11), (9,10), # eyebrow + (19,23), (20,22), # below nose + (24,33), (25,32), (26,31), (27,30), (28,35), (29,34), # eyes + (36,42), (37,41), (38,40), (43,47), (44,46), # mouth + (48,52), (49,51), (53,55), # lip + (56,72), (57,71), (58,70), (59,69), (60,68), (61,67), (62,66), (63,65) # face controus + ) + self.orig_joints_name = [str(i) for i in range(self.orig_joint_num)] + self.orig_root_joint_idx = 0 + + # changed FLAME joint set + self.joint_num = self.orig_joint_num + self.flip_pairs = self.orig_flip_pairs + self.joints_name = self.orig_joints_name + self.root_joint_idx = self.orig_root_joint_idx + +smpl_x = SMPLX() +smpl = SMPL() +mano = MANO() +# flame = FLAME() \ No newline at end of file diff --git a/utils/losses.py b/utils/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..b90ebee62a5d82a047cdf7adf614a5380acb426a --- /dev/null +++ b/utils/losses.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +class ReConsLoss(nn.Module): + def __init__(self, motion_dim=272): + super(ReConsLoss, self).__init__() + self.motion_dim = motion_dim + + def softclip(self, tensor, min): + result_tensor = min + F.softplus(tensor - min) + return result_tensor + + def gaussian_nll(self, mu, log_sigma, x): + return 0.5 * torch.pow((x - mu) / log_sigma.exp(), 2) + log_sigma + 0.5 * np.log(2 * np.pi) + + def forward(self, motion_pred, motion_gt) : + """Optimal sigma VAE loss, see https://arxiv.org/pdf/2006.13202 for more details""" + log_sigma = ((motion_gt[..., :self.motion_dim] - motion_pred[..., :self.motion_dim]) ** 2).mean([0,1,2], keepdim=True).sqrt().log() + log_sigma = self.softclip(log_sigma, -6) + loss = self.gaussian_nll(motion_pred[..., :self.motion_dim], log_sigma, motion_gt[..., :self.motion_dim]).sum() + return loss + + + def forward_KL(self, mu, logvar): + loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=(1, 2)) + return loss.mean() + + def forward_root(self, motion_pred, motion_gt): + """[..., :8] relate to the root joint""" + root_log_sigma = ((motion_gt[..., :8] - motion_pred[..., :8]) ** 2).mean([0,1,2], keepdim=True).sqrt().log() + root_log_sigma = self.softclip(root_log_sigma, -6) + root_loss = self.gaussian_nll(motion_pred[..., :8], root_log_sigma, motion_gt[..., :8]).sum() + return root_loss + diff --git a/utils/paramUtil.py b/utils/paramUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..a9f1708b85ca80a9051cb3675cec9b999a0d0e2b --- /dev/null +++ b/utils/paramUtil.py @@ -0,0 +1,63 @@ +import numpy as np + +# Define a kinematic tree for the skeletal struture +kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] + +kit_raw_offsets = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1] + ] +) + +t2m_raw_offsets = np.array([[0,0,0], + [1,0,0], + [-1,0,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,0,1], + [0,0,1], + [0,1,0], + [1,0,0], + [-1,0,0], + [0,0,1], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0]]) + +t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] +t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] +t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] + + +kit_tgt_skel_id = '03950' + +t2m_tgt_skel_id = '000021' + diff --git a/utils/quat.py b/utils/quat.py new file mode 100644 index 0000000000000000000000000000000000000000..e703289a82498c0480bc369fb18264778086f309 --- /dev/null +++ b/utils/quat.py @@ -0,0 +1,321 @@ +import numpy as np + +# Calculate cross object of two 3D vectors. +def _fast_cross(a, b): + return np.concatenate([ + a[...,1:2]*b[...,2:3] - a[...,2:3]*b[...,1:2], + a[...,2:3]*b[...,0:1] - a[...,0:1]*b[...,2:3], + a[...,0:1]*b[...,1:2] - a[...,1:2]*b[...,0:1]], axis=-1) + +# Make origin quaternions (No rotations) +def eye(shape, dtype=np.float32): + return np.ones(list(shape) + [4], dtype=dtype) * np.asarray([1, 0, 0, 0], dtype=dtype) + +# Return norm of quaternions +def length(x): + return np.sqrt(np.sum(x * x, axis=-1)) + +# Make unit quaternions +def normalize(x, eps=1e-8): + return x / (length(x)[...,None] + eps) + +def abs(x): + return np.where(x[...,0:1] > 0.0, x, -x) + +# Calculate inverse rotations +def inv(q): + return np.array([1, -1, -1, -1], dtype=np.float32) * q + +# Calculate the dot product of two quaternions +def dot(x, y): + return np.sum(x * y, axis=-1)[...,None] if x.ndim > 1 else np.sum(x * y, axis=-1) + +# Multiply two quaternions (return rotations). +def mul(x, y): + x0, x1, x2, x3 = x[..., 0:1], x[..., 1:2], x[..., 2:3], x[..., 3:4] + y0, y1, y2, y3 = y[..., 0:1], y[..., 1:2], y[..., 2:3], y[..., 3:4] + + return np.concatenate([ + y0 * x0 - y1 * x1 - y2 * x2 - y3 * x3, + y0 * x1 + y1 * x0 - y2 * x3 + y3 * x2, + y0 * x2 + y1 * x3 + y2 * x0 - y3 * x1, + y0 * x3 - y1 * x2 + y2 * x1 + y3 * x0], axis=-1) + +def inv_mul(x, y): + return mul(inv(x), y) + +def mul_inv(x, y): + return mul(x, inv(y)) + +# Multiply quaternions and vectors (return vectors). +def mul_vec(q, x): + t = 2.0 * _fast_cross(q[..., 1:], x) + return x + q[..., 0][..., None] * t + _fast_cross(q[..., 1:], t) + +def inv_mul_vec(q, x): + return mul_vec(inv(q), x) + +def unroll(x): + y = x.copy() + for i in range(1, len(x)): + d0 = np.sum( y[i] * y[i-1], axis=-1) + d1 = np.sum(-y[i] * y[i-1], axis=-1) + y[i][d0 < d1] = -y[i][d0 < d1] + return y + +# Calculate quaternions between two 3D vectors (x to y). +def between(x, y): + return np.concatenate([ + np.sqrt(np.sum(x*x, axis=-1) * np.sum(y*y, axis=-1))[...,None] + + np.sum(x * y, axis=-1)[...,None], + _fast_cross(x, y)], axis=-1) + +def log(x, eps=1e-5): + length = np.sqrt(np.sum(np.square(x[...,1:]), axis=-1))[...,None] + halfangle = np.where(length < eps, np.ones_like(length), np.arctan2(length, x[...,0:1]) / length) + return halfangle * x[...,1:] + +def exp(x, eps=1e-5): + halfangle = np.sqrt(np.sum(np.square(x), axis=-1))[...,None] + c = np.where(halfangle < eps, np.ones_like(halfangle), np.cos(halfangle)) + s = np.where(halfangle < eps, np.ones_like(halfangle), np.sinc(halfangle / np.pi)) + return np.concatenate([c, s * x], axis=-1) + +# Calculate global space rotations and positions from local space. +def fk(lrot, lpos, parents): + + gp, gr = [lpos[...,:1,:]], [lrot[...,:1,:]] + for i in range(1, len(parents)): + gp.append(mul_vec(gr[parents[i]], lpos[...,i:i+1,:]) + gp[parents[i]]) + gr.append(mul (gr[parents[i]], lrot[...,i:i+1,:])) + + return np.concatenate(gr, axis=-2), np.concatenate(gp, axis=-2) + +def fk_rot(lrot, parents): + + gr = [lrot[...,:1,:]] + for i in range(1, len(parents)): + gr.append(mul(gr[parents[i]], lrot[...,i:i+1,:])) + + return np.concatenate(gr, axis=-2) + +# Calculate local space rotations and positions from global space. +def ik(grot, gpos, parents): + + return ( + np.concatenate([ + grot[...,:1,:], + mul(inv(grot[...,parents[1:],:]), grot[...,1:,:]), + ], axis=-2), + np.concatenate([ + gpos[...,:1,:], + mul_vec( + inv(grot[...,parents[1:],:]), + gpos[...,1:,:] - gpos[...,parents[1:],:]), + ], axis=-2)) + +def ik_rot(grot, parents): + + return np.concatenate([grot[...,:1,:], + mul(inv(grot[...,parents[1:],:]), grot[...,1:,:]), + ], axis=-2) + +def fk_vel(lrot, lpos, lvel, lang, parents): + + gp, gr, gv, ga = [lpos[...,:1,:]], [lrot[...,:1,:]], [lvel[...,:1,:]], [lang[...,:1,:]] + for i in range(1, len(parents)): + gp.append(mul_vec(gr[parents[i]], lpos[...,i:i+1,:]) + gp[parents[i]]) + gr.append(mul (gr[parents[i]], lrot[...,i:i+1,:])) + gv.append(mul_vec(gr[parents[i]], lvel[...,i:i+1,:]) + + _fast_cross(ga[parents[i]], mul_vec(gr[parents[i]], lpos[...,i:i+1,:])) + + gv[parents[i]]) + ga.append(mul_vec(gr[parents[i]], lang[...,i:i+1,:]) + ga[parents[i]]) + + return ( + np.concatenate(gr, axis=-2), + np.concatenate(gp, axis=-2), + np.concatenate(gv, axis=-2), + np.concatenate(ga, axis=-2)) + +# Linear Interpolation of two vectors +def lerp(x, y, t): + return (1 - t) * x + t * y + +# LERP of quaternions +def quat_lerp(x, y, t): + return normalize(lerp(x, y, t)) + +# Spherical linear interpolation of quaternions +def slerp(x, y, t): + if t == 0: + return x + elif t == 1: + return y + + if dot(x, y) < 0: + y = - y + ca = dot(x, y) + theta = np.arccos(np.clip(ca, 0, 1)) + + r = normalize(y - x * ca) + + return x * np.cos(theta * t) + r * np.sin(theta * t) + + +################################################### +# Calculate other rotations from other quaternions. +################################################### + +# Calculate euler angles from quaternions. +def to_euler(x, order='zyx'): + + q0 = x[...,0:1] + q1 = x[...,1:2] + q2 = x[...,2:3] + q3 = x[...,3:4] + + if order == 'zyx': + + return np.concatenate([ + np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)), + np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1)), + np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))], axis=-1) + + elif order == 'yzx': + + return np.concatenate([ + np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0), + np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1)), + np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)],axis=-1) + + elif order == 'zxy': + + return np.concatenate([ + np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3), + np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1)), + np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)], axis=-1) + + elif order == 'yxz': + + return np.concatenate([ + np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3), + np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1)), + np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)], axis=-1) + + else: + raise NotImplementedError('Cannot convert from ordering %s' % order) + +# Calculate rotation matrix from quaternions. +def to_xform(x): + + qw, qx, qy, qz = x[...,0:1], x[...,1:2], x[...,2:3], x[...,3:4] + + x2, y2, z2 = qx + qx, qy + qy, qz + qz + xx, yy, wx = qx * x2, qy * y2, qw * x2 + xy, yz, wy = qx * y2, qy * z2, qw * y2 + xz, zz, wz = qx * z2, qz * z2, qw * z2 + + return np.concatenate([ + np.concatenate([1.0 - (yy + zz), xy - wz, xz + wy], axis=-1)[...,None,:], + np.concatenate([xy + wz, 1.0 - (xx + zz), yz - wx], axis=-1)[...,None,:], + np.concatenate([xz - wy, yz + wx, 1.0 - (xx + yy)], axis=-1)[...,None,:], + ], axis=-2) + +# Calculate 6d orthogonal rotation representation (ortho6d) from quaternions. +# https://github.com/papagina/RotationContinuity +def to_xform_xy(x): + + qw, qx, qy, qz = x[...,0:1], x[...,1:2], x[...,2:3], x[...,3:4] + + x2, y2, z2 = qx + qx, qy + qy, qz + qz + xx, yy, wx = qx * x2, qy * y2, qw * x2 + xy, yz, wy = qx * y2, qy * z2, qw * y2 + xz, zz, wz = qx * z2, qz * z2, qw * z2 + + return np.concatenate([ + np.concatenate([1.0 - (yy + zz), xy - wz], axis=-1)[...,None,:], + np.concatenate([xy + wz, 1.0 - (xx + zz)], axis=-1)[...,None,:], + np.concatenate([xz - wy, yz + wx], axis=-1)[...,None,:], + ], axis=-2) + +# Calculate scaled angle axis from quaternions. +def to_scaled_angle_axis(x, eps=1e-5): + return 2.0 * log(x, eps) + + +############################################# +# Calculate quaternions from other rotations. +############################################# + +# Calculate quaternions from axis angles. +def from_angle_axis(angle, axis): + c = np.cos(angle / 2.0)[..., None] + s = np.sin(angle / 2.0)[..., None] + q = np.concatenate([c, s * axis], axis=-1) + return q + +# Calculate quaternions from axis-angle. +def from_axis_angle(rots): + angle = np.linalg.norm(rots, axis=-1) + import pdb; pdb.set_trace() + axis = rots / angle[...,None] + return from_angle_axis(angle, axis) + +# Calculate quaternions from euler angles. +def from_euler(e, order='zyx'): + axis = { + 'x': np.asarray([1, 0, 0], dtype=np.float32), + 'y': np.asarray([0, 1, 0], dtype=np.float32), + 'z': np.asarray([0, 0, 1], dtype=np.float32)} + + q0 = from_angle_axis(e[..., 0], axis[order[0]]) + q1 = from_angle_axis(e[..., 1], axis[order[1]]) + q2 = from_angle_axis(e[..., 2], axis[order[2]]) + + return mul(q0, mul(q1, q2)) + +# Calculate quaternions from rotation matrix. +def from_xform(ts): + + return normalize( + np.where((ts[...,2,2] < 0.0)[...,None], + np.where((ts[...,0,0] > ts[...,1,1])[...,None], + np.concatenate([ + (ts[...,2,1]-ts[...,1,2])[...,None], + (1.0 + ts[...,0,0] - ts[...,1,1] - ts[...,2,2])[...,None], + (ts[...,1,0]+ts[...,0,1])[...,None], + (ts[...,0,2]+ts[...,2,0])[...,None]], axis=-1), + np.concatenate([ + (ts[...,0,2]-ts[...,2,0])[...,None], + (ts[...,1,0]+ts[...,0,1])[...,None], + (1.0 - ts[...,0,0] + ts[...,1,1] - ts[...,2,2])[...,None], + (ts[...,2,1]+ts[...,1,2])[...,None]], axis=-1)), + np.where((ts[...,0,0] < -ts[...,1,1])[...,None], + np.concatenate([ + (ts[...,1,0]-ts[...,0,1])[...,None], + (ts[...,0,2]+ts[...,2,0])[...,None], + (ts[...,2,1]+ts[...,1,2])[...,None], + (1.0 - ts[...,0,0] - ts[...,1,1] + ts[...,2,2])[...,None]], axis=-1), + np.concatenate([ + (1.0 + ts[...,0,0] + ts[...,1,1] + ts[...,2,2])[...,None], + (ts[...,2,1]-ts[...,1,2])[...,None], + (ts[...,0,2]-ts[...,2,0])[...,None], + (ts[...,1,0]-ts[...,0,1])[...,None]], axis=-1)))) + +# Calculate quaternions from ortho6d. +def from_xform_xy(x): + + c2 = _fast_cross(x[...,0], x[...,1]) + c2 = c2 / np.sqrt(np.sum(np.square(c2), axis=-1))[...,None] + c1 = _fast_cross(c2, x[...,0]) + c1 = c1 / np.sqrt(np.sum(np.square(c1), axis=-1))[...,None] + c0 = x[...,0] + + return from_xform(np.concatenate([ + c0[...,None], + c1[...,None], + c2[...,None]], axis=-1)) + +# Calculate quaternions from scaled angle axis. +def from_scaled_angle_axis(x, eps=1e-5): + return exp(x / 2.0, eps) \ No newline at end of file diff --git a/utils/rotation_conversions.py b/utils/rotation_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad08f5db949842788af052aed99941f75a7396e --- /dev/null +++ b/utils/rotation_conversions.py @@ -0,0 +1,547 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# Check PYTORCH3D_LICENCE before use + +import functools +from typing import Optional + +import torch +import torch.nn.functional as F + + +""" +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) +This matrix can be applied to column vectors by post multiplication +by the points e.g. + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +""" + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a, b): + """ + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + Returns: + Tensor of the same shape as a with the signs of b. + """ + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x): + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + + +def matrix_to_quaternion(matrix): + """ + Convert rotations given as rotation matrices to quaternions. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + m00 = matrix[..., 0, 0] + m11 = matrix[..., 1, 1] + m22 = matrix[..., 2, 2] + o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22) + x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22) + y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22) + z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22) + o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2]) + o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0]) + o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1]) + return torch.stack((o0, o1, o2, o3), -1) + + +def _axis_angle_rotation(axis: str, angle): + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + if axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + if axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles, convention: str): + """ + Convert rotations given as Euler angles in radians to rotation matrices. + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) + return functools.reduce(torch.matmul, matrices) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +): + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str): + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + + +def matrix_to_euler_angles(matrix, convention: str): + """ + Convert rotations given as rotation matrices to Euler angles in radians. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + Returns: + Euler angles in radians as tensor of shape (..., 3). + """ + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + Returns: + Quaternions as tensor of shape (N, 4). + """ + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random rotations as 3x3 rotation matrices. + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + """ + quaternions = random_quaternions( + n, dtype=dtype, device=device, requires_grad=requires_grad + ) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate a single random 3x3 rotation matrix. + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + requires_grad: Whether the resulting tensor should have the gradient + flag set + Returns: + Rotation matrix as tensor of shape (3, 3). + """ + return random_rotations(1, dtype, device, requires_grad)[0] + + +def standardize_quaternion(quaternions): + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a, b): + """ + Multiply two quaternions. + Usual torch rules for broadcasting apply. + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + """ + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a, b): + """ + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + """ + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion): + """ + Given a quaternion representing rotation, get the quaternion representing + its inverse. + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + """ + + return quaternion * quaternion.new_tensor([1, -1, -1, -1]) + + +def quaternion_apply(quaternion, point): + """ + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + Returns: + Tensor of rotated points of shape (..., 3). + """ + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, f{point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle): + """ + Convert rotations given as axis/angle to rotation matrices. + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix): + """ + Convert rotations given as rotation matrices to axis/angle. + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle): + """ + Convert rotations given as axis/angle to quaternions. + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions): + """ + Convert rotations given as quaternions to axis/angle. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + """ + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalisation per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + Returns: + batch of rotation matrices of size (*, 3, 3) + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + """ + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + Returns: + 6D rotation representation, of size (*, 6) + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) + +def canonicalize_smplh(poses, trans = None): + bs, nframes, njoints = poses.shape[:3] + + global_orient = poses[:, :, 0] + + # first global rotations + rot2d = matrix_to_axis_angle(global_orient[:, 0]) + #rot2d[:, :2] = 0 # Remove the rotation along the vertical axis + rot2d = axis_angle_to_matrix(rot2d) + + # Rotate the global rotation to eliminate Z rotations + global_orient = torch.einsum("ikj,imkl->imjl", rot2d, global_orient) + + # Construct canonicalized version of x + xc = torch.cat((global_orient[:, :, None], poses[:, :, 1:]), dim=2) + + if trans is not None: + vel = trans[:, 1:] - trans[:, :-1] + # Turn the translation as well + vel = torch.einsum("ikj,ilk->ilj", rot2d, vel) + trans = torch.cat((torch.zeros(bs, 1, 3, device=vel.device), + torch.cumsum(vel, 1)), 1) + return xc, trans + else: + return xc + + + # Added +def matrix_of_angles(cos, sin, inv=False, dim=2): + assert dim in [2, 3] + sin = -sin if inv else sin + if dim == 2: + row1 = torch.stack((cos, -sin), axis=-1) + row2 = torch.stack((sin, cos), axis=-1) + return torch.stack((row1, row2), axis=-2) + elif dim == 3: + row1 = torch.stack((cos, -sin, 0*cos), axis=-1) + row2 = torch.stack((sin, cos, 0*cos), axis=-1) + row3 = torch.stack((0*sin, 0*cos, 1+0*cos), axis=-1) + return torch.stack((row1, row2, row3),axis=-2) + + \ No newline at end of file diff --git a/utils/smplx/LICENSE b/utils/smplx/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3034a97b164d6e006655493e950314ec58e200cd --- /dev/null +++ b/utils/smplx/LICENSE @@ -0,0 +1,58 @@ +License + +Software Copyright License for non-commercial scientific research purposes +Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use the SMPL-X/SMPLify-X model, data and software, (the "Model & Software"), including 3D meshes, blend weights, blend shapes, textures, software, scripts, and animations. By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use of this github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License + +Ownership / Licensees +The Software and the associated materials has been developed at the + +Max Planck Institute for Intelligent Systems (hereinafter "MPI"). + +Any copyright or patent right is owned by and proprietary material of the + +Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck”) + +hereinafter the “Licensor”. + +License Grant +Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right: + +To install the Model & Software on computers owned, leased or otherwise controlled by you and/or your organization; +To use the Model & Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects; +Any other use, in particular any use for commercial purposes, is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artifacts for commercial purposes. The Model & Software may not be reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission. + +The Model & Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not. This license also prohibits the use of the Model & Software to train methods/algorithms/neural networks/etc. for commercial use of any kind. By downloading the Model & Software, you agree not to reverse engineer it. + +No Distribution +The Model & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive purposes only. + +Disclaimer of Representations and Warranties +You expressly acknowledge and agree that the Model & Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Model & Software is at your sole risk. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE MODEL & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of the Model & Software, (ii) that the use of the Model & Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and (iii) that the use of the Model & Software will not cause any damage of any kind to you or a third party. + +Limitation of Liability +Because this Model & Software License Agreement qualifies as a donation, according to Section 521 of the German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only. If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage. +Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall be excluded. +Patent claims generated through the usage of the Model & Software cannot be directed towards the copyright holders. +The Model & Software is provided in the state of development the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the Model & Software and is not responsible for any problems such modifications cause. + +No Maintenance Services +You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Model & Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Model & Software at any time. + +Defects of the Model & Software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification or publication. + +Publications using the Model & Software +You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Model & Software. + +Citation: + + +@inproceedings{SMPL-X:2019, + title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image}, + author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.}, + booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, + year = {2019} +} +Commercial licensing opportunities +For commercial uses of the Software, please send email to ps-license@tue.mpg.de + +This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention. diff --git a/utils/smplx/README.md b/utils/smplx/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fb2df07aae5c116d23056b53160505316d72ea5f --- /dev/null +++ b/utils/smplx/README.md @@ -0,0 +1,186 @@ +## SMPL-X: A new joint 3D model of the human body, face and hands together + +[[Paper Page](https://smpl-x.is.tue.mpg.de)] [[Paper](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/497/SMPL-X.pdf)] +[[Supp. Mat.](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/498/SMPL-X-supp.pdf)] + +![SMPL-X Examples](./images/teaser_fig.png) + +## Table of Contents + * [License](#license) + * [Description](#description) + * [Installation](#installation) + * [Downloading the model](#downloading-the-model) + * [Loading SMPL-X, SMPL+H and SMPL](#loading-smpl-x-smplh-and-smpl) + * [SMPL and SMPL+H setup](#smpl-and-smplh-setup) + * [Model loading](https://github.com/vchoutas/smplx#model-loading) + * [MANO and FLAME correspondences](#mano-and-flame-correspondences) + * [Example](#example) + * [Citation](#citation) + * [Acknowledgments](#acknowledgments) + * [Contact](#contact) + +## License + +Software Copyright License for **non-commercial scientific research purposes**. +Please read carefully the [terms and conditions](https://github.com/vchoutas/smplx/blob/master/LICENSE) and any accompanying documentation before you download and/or use the SMPL-X/SMPLify-X model, data and software, (the "Model & Software"), including 3D meshes, blend weights, blend shapes, textures, software, scripts, and animations. By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use of this github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this [License](./LICENSE). + +## Disclaimer + +The original images used for the figures 1 and 2 of the paper can be found in this link. +The images in the paper are used under license from gettyimages.com. +We have acquired the right to use them in the publication, but redistribution is not allowed. +Please follow the instructions on the given link to acquire right of usage. +Our results are obtained on the 483 × 724 pixels resolution of the original images. + +## Description + +*SMPL-X* (SMPL eXpressive) is a unified body model with shape parameters trained jointly for the +face, hands and body. *SMPL-X* uses standard vertex based linear blend skinning with learned corrective blend +shapes, has N = 10, 475 vertices and K = 54 joints, +which include joints for the neck, jaw, eyeballs and fingers. +SMPL-X is defined by a function M(θ, β, ψ), where θ is the pose parameters, β the shape parameters and +ψ the facial expression parameters. + + +## Installation + +To install the model please follow the next steps in the specified order: +1. To install from PyPi simply run: + ```Shell + pip install smplx[all] + ``` +2. Clone this repository and install it using the *setup.py* script: +```Shell +git clone https://github.com/vchoutas/smplx +python setup.py install +``` + +## Downloading the model + +To download the *SMPL-X* model go to [this project website](https://smpl-x.is.tue.mpg.de) and register to get access to the downloads section. + +To download the *SMPL+H* model go to [this project website](http://mano.is.tue.mpg.de) and register to get access to the downloads section. + +To download the *SMPL* model go to [this](http://smpl.is.tue.mpg.de) (male and female models) and [this](http://smplify.is.tue.mpg.de) (gender neutral model) project website and register to get access to the downloads section. + +## Loading SMPL-X, SMPL+H and SMPL + +### SMPL and SMPL+H setup + +The loader gives the option to use any of the SMPL-X, SMPL+H, SMPL, and MANO models. Depending on the model you want to use, please follow the respective download instructions. To switch between MANO, SMPL, SMPL+H and SMPL-X just change the *model_path* or *model_type* parameters. For more details please check the docs of the model classes. +Before using SMPL and SMPL+H you should follow the instructions in [tools/README.md](./tools/README.md) to remove the +Chumpy objects from both model pkls, as well as merge the MANO parameters with SMPL+H. + +### Model loading + +You can either use the [create](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L54) +function from [body_models](./smplx/body_models.py) or directly call the constructor for the +[SMPL](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L106), +[SMPL+H](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L395) and +[SMPL-X](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L628) model. The path to the model can either be the path to the file with the parameters or a directory with the following structure: +```bash +models +├── smpl +│   ├── SMPL_FEMALE.pkl +│   └── SMPL_MALE.pkl +│   └── SMPL_NEUTRAL.pkl +├── smplh +│   ├── SMPLH_FEMALE.pkl +│   └── SMPLH_MALE.pkl +├── mano +| ├── MANO_RIGHT.pkl +| └── MANO_LEFT.pkl +└── smplx + ├── SMPLX_FEMALE.npz + ├── SMPLX_FEMALE.pkl + ├── SMPLX_MALE.npz + ├── SMPLX_MALE.pkl + ├── SMPLX_NEUTRAL.npz + └── SMPLX_NEUTRAL.pkl +``` + + +## MANO and FLAME correspondences + +The vertex correspondences between SMPL-X and MANO, FLAME can be downloaded +from [the project website](https://smpl-x.is.tue.mpg.de). If you have extracted +the correspondence data in the folder *correspondences*, then use the following +scripts to visualize them: + +1. To view MANO correspondences run the following command: + +``` +python examples/vis_mano_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/MANO_SMPLX_vertex_ids.pkl +``` + +2. To view FLAME correspondences run the following command: + +``` +python examples/vis_flame_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/SMPL-X__FLAME_vertex_ids.npy +``` + +## Example + +After installing the *smplx* package and downloading the model parameters you should be able to run the *demo.py* +script to visualize the results. For this step you have to install the [pyrender](https://pyrender.readthedocs.io/en/latest/index.html) and [trimesh](https://trimsh.org/) packages. + +`python examples/demo.py --model-folder $SMPLX_FOLDER --plot-joints=True --gender="neutral"` + +![SMPL-X Examples](./images/example.png) + +## Citation + +Depending on which model is loaded for your project, i.e. SMPL-X or SMPL+H or SMPL, please cite the most relevant work below, listed in the same order: + +``` +@inproceedings{SMPL-X:2019, + title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image}, + author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.}, + booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, + year = {2019} +} +``` + +``` +@article{MANO:SIGGRAPHASIA:2017, + title = {Embodied Hands: Modeling and Capturing Hands and Bodies Together}, + author = {Romero, Javier and Tzionas, Dimitrios and Black, Michael J.}, + journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)}, + volume = {36}, + number = {6}, + series = {245:1--245:17}, + month = nov, + year = {2017}, + month_numeric = {11} + } +``` + +``` +@article{SMPL:2015, + author = {Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.}, + title = {{SMPL}: A Skinned Multi-Person Linear Model}, + journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)}, + month = oct, + number = {6}, + pages = {248:1--248:16}, + publisher = {ACM}, + volume = {34}, + year = {2015} +} +``` + +This repository was originally developed for SMPL-X / SMPLify-X (CVPR 2019), you might be interested in having a look: [https://smpl-x.is.tue.mpg.de](https://smpl-x.is.tue.mpg.de). + +## Acknowledgments + +### Facial Contour + +Special thanks to [Soubhik Sanyal](https://github.com/soubhiksanyal) for sharing the Tensorflow code used for the facial +landmarks. + +## Contact +The code of this repository was implemented by [Vassilis Choutas](vassilis.choutas@tuebingen.mpg.de). + +For questions, please contact [smplx@tue.mpg.de](smplx@tue.mpg.de). + +For commercial licensing (and all related questions for business applications), please contact [ps-licensing@tue.mpg.de](ps-licensing@tue.mpg.de). diff --git a/utils/smplx/examples/demo.py b/utils/smplx/examples/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6fd5024f4ac05d9f5db336b769d84836b51c18 --- /dev/null +++ b/utils/smplx/examples/demo.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import os.path as osp +import argparse + +import numpy as np +import torch + +import smplx + + +def main(model_folder, + model_type='smplx', + ext='npz', + gender='neutral', + plot_joints=False, + num_betas=10, + sample_shape=True, + sample_expression=True, + num_expression_coeffs=10, + plotting_module='pyrender', + use_face_contour=False): + + model = smplx.create(model_folder, model_type=model_type, + gender=gender, use_face_contour=use_face_contour, + num_betas=num_betas, + num_expression_coeffs=num_expression_coeffs, + ext=ext) + print(model) + + betas, expression = None, None + if sample_shape: + betas = torch.randn([1, model.num_betas], dtype=torch.float32) + if sample_expression: + expression = torch.randn( + [1, model.num_expression_coeffs], dtype=torch.float32) + + output = model(betas=betas, expression=expression, + return_verts=True) + vertices = output.vertices.detach().cpu().numpy().squeeze() + joints = output.joints.detach().cpu().numpy().squeeze() + + print('Vertices shape =', vertices.shape) + print('Joints shape =', joints.shape) + + if plotting_module == 'pyrender': + import pyrender + import trimesh + vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8] + tri_mesh = trimesh.Trimesh(vertices, model.faces, + vertex_colors=vertex_colors) + + mesh = pyrender.Mesh.from_trimesh(tri_mesh) + + scene = pyrender.Scene() + scene.add(mesh) + + if plot_joints: + sm = trimesh.creation.uv_sphere(radius=0.005) + sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] + tfs = np.tile(np.eye(4), (len(joints), 1, 1)) + tfs[:, :3, 3] = joints + joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) + scene.add(joints_pcl) + + pyrender.Viewer(scene, use_raymond_lighting=True) + elif plotting_module == 'matplotlib': + from matplotlib import pyplot as plt + from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + + mesh = Poly3DCollection(vertices[model.faces], alpha=0.1) + face_color = (1.0, 1.0, 0.9) + edge_color = (0, 0, 0) + mesh.set_edgecolor(edge_color) + mesh.set_facecolor(face_color) + ax.add_collection3d(mesh) + ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') + + if plot_joints: + ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.1) + plt.show() + elif plotting_module == 'open3d': + import open3d as o3d + + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + vertices) + mesh.triangles = o3d.utility.Vector3iVector(model.faces) + mesh.compute_vertex_normals() + mesh.paint_uniform_color([0.3, 0.3, 0.3]) + + geometry = [mesh] + if plot_joints: + joints_pcl = o3d.geometry.PointCloud() + joints_pcl.points = o3d.utility.Vector3dVector(joints) + joints_pcl.paint_uniform_color([0.7, 0.3, 0.3]) + geometry.append(joints_pcl) + + o3d.visualization.draw_geometries(geometry) + else: + raise ValueError('Unknown plotting_module: {}'.format(plotting_module)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='SMPL-X Demo') + + parser.add_argument('--model-folder', required=True, type=str, + help='The path to the model folder') + parser.add_argument('--model-type', default='smplx', type=str, + choices=['smpl', 'smplh', 'smplx', 'mano', 'flame'], + help='The type of model to load') + parser.add_argument('--gender', type=str, default='neutral', + help='The gender of the model') + parser.add_argument('--num-betas', default=10, type=int, + dest='num_betas', + help='Number of shape coefficients.') + parser.add_argument('--num-expression-coeffs', default=10, type=int, + dest='num_expression_coeffs', + help='Number of expression coefficients.') + parser.add_argument('--plotting-module', type=str, default='pyrender', + dest='plotting_module', + choices=['pyrender', 'matplotlib', 'open3d'], + help='The module to use for plotting the result') + parser.add_argument('--ext', type=str, default='npz', + help='Which extension to use for loading') + parser.add_argument('--plot-joints', default=False, + type=lambda arg: arg.lower() in ['true', '1'], + help='The path to the model folder') + parser.add_argument('--sample-shape', default=True, + dest='sample_shape', + type=lambda arg: arg.lower() in ['true', '1'], + help='Sample a random shape') + parser.add_argument('--sample-expression', default=True, + dest='sample_expression', + type=lambda arg: arg.lower() in ['true', '1'], + help='Sample a random expression') + parser.add_argument('--use-face-contour', default=False, + type=lambda arg: arg.lower() in ['true', '1'], + help='Compute the contour of the face') + + args = parser.parse_args() + + model_folder = osp.expanduser(osp.expandvars(args.model_folder)) + model_type = args.model_type + plot_joints = args.plot_joints + use_face_contour = args.use_face_contour + gender = args.gender + ext = args.ext + plotting_module = args.plotting_module + num_betas = args.num_betas + num_expression_coeffs = args.num_expression_coeffs + sample_shape = args.sample_shape + sample_expression = args.sample_expression + + main(model_folder, model_type, ext=ext, + gender=gender, plot_joints=plot_joints, + num_betas=num_betas, + num_expression_coeffs=num_expression_coeffs, + sample_shape=sample_shape, + sample_expression=sample_expression, + plotting_module=plotting_module, + use_face_contour=use_face_contour) diff --git a/utils/smplx/examples/demo_layers.py b/utils/smplx/examples/demo_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4e18226c02a6c06c5158dc66276598ba96163a --- /dev/null +++ b/utils/smplx/examples/demo_layers.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import os.path as osp +import argparse + +import numpy as np +import torch + +import smplx + + +def main(model_folder, + model_type='smplx', + ext='npz', + gender='neutral', + plot_joints=False, + num_betas=10, + sample_shape=True, + sample_expression=True, + num_expression_coeffs=10, + plotting_module='pyrender', + use_face_contour=False): + + model = smplx.build_layer( + model_folder, model_type=model_type, + gender=gender, use_face_contour=use_face_contour, + num_betas=num_betas, + num_expression_coeffs=num_expression_coeffs, + ext=ext) + print(model) + + betas, expression = None, None + if sample_shape: + betas = torch.randn([1, model.num_betas], dtype=torch.float32) + if sample_expression: + expression = torch.randn( + [1, model.num_expression_coeffs], dtype=torch.float32) + + output = model(betas=betas, expression=expression, + return_verts=True) + vertices = output.vertices.detach().cpu().numpy().squeeze() + joints = output.joints.detach().cpu().numpy().squeeze() + + print('Vertices shape =', vertices.shape) + print('Joints shape =', joints.shape) + + if plotting_module == 'pyrender': + import pyrender + import trimesh + vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8] + tri_mesh = trimesh.Trimesh(vertices, model.faces, + vertex_colors=vertex_colors) + + mesh = pyrender.Mesh.from_trimesh(tri_mesh) + + scene = pyrender.Scene() + scene.add(mesh) + + if plot_joints: + sm = trimesh.creation.uv_sphere(radius=0.005) + sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] + tfs = np.tile(np.eye(4), (len(joints), 1, 1)) + tfs[:, :3, 3] = joints + joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) + scene.add(joints_pcl) + + pyrender.Viewer(scene, use_raymond_lighting=True) + elif plotting_module == 'matplotlib': + from matplotlib import pyplot as plt + from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + + mesh = Poly3DCollection(vertices[model.faces], alpha=0.1) + face_color = (1.0, 1.0, 0.9) + edge_color = (0, 0, 0) + mesh.set_edgecolor(edge_color) + mesh.set_facecolor(face_color) + ax.add_collection3d(mesh) + ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') + + if plot_joints: + ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.1) + plt.show() + elif plotting_module == 'open3d': + import open3d as o3d + + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + vertices) + mesh.triangles = o3d.utility.Vector3iVector(model.faces) + mesh.compute_vertex_normals() + mesh.paint_uniform_color([0.3, 0.3, 0.3]) + + geometry = [mesh] + if plot_joints: + joints_pcl = o3d.geometry.PointCloud() + joints_pcl.points = o3d.utility.Vector3dVector(joints) + joints_pcl.paint_uniform_color([0.7, 0.3, 0.3]) + geometry.append(joints_pcl) + + o3d.visualization.draw_geometries(geometry) + else: + raise ValueError('Unknown plotting_module: {}'.format(plotting_module)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='SMPL-X Demo') + + parser.add_argument('--model-folder', required=True, type=str, + help='The path to the model folder') + parser.add_argument('--model-type', default='smplx', type=str, + choices=['smpl', 'smplh', 'smplx', 'mano', 'flame'], + help='The type of model to load') + parser.add_argument('--gender', type=str, default='neutral', + help='The gender of the model') + parser.add_argument('--num-betas', default=10, type=int, + dest='num_betas', + help='Number of shape coefficients.') + parser.add_argument('--num-expression-coeffs', default=10, type=int, + dest='num_expression_coeffs', + help='Number of expression coefficients.') + parser.add_argument('--plotting-module', type=str, default='pyrender', + dest='plotting_module', + choices=['pyrender', 'matplotlib', 'open3d'], + help='The module to use for plotting the result') + parser.add_argument('--ext', type=str, default='npz', + help='Which extension to use for loading') + parser.add_argument('--plot-joints', default=False, + type=lambda arg: arg.lower() in ['true', '1'], + help='The path to the model folder') + parser.add_argument('--sample-shape', default=True, + dest='sample_shape', + type=lambda arg: arg.lower() in ['true', '1'], + help='Sample a random shape') + parser.add_argument('--sample-expression', default=True, + dest='sample_expression', + type=lambda arg: arg.lower() in ['true', '1'], + help='Sample a random expression') + parser.add_argument('--use-face-contour', default=False, + type=lambda arg: arg.lower() in ['true', '1'], + help='Compute the contour of the face') + + args = parser.parse_args() + + model_folder = osp.expanduser(osp.expandvars(args.model_folder)) + model_type = args.model_type + plot_joints = args.plot_joints + use_face_contour = args.use_face_contour + gender = args.gender + ext = args.ext + plotting_module = args.plotting_module + num_betas = args.num_betas + num_expression_coeffs = args.num_expression_coeffs + sample_shape = args.sample_shape + sample_expression = args.sample_expression + + main(model_folder, model_type, ext=ext, + gender=gender, plot_joints=plot_joints, + num_betas=num_betas, + num_expression_coeffs=num_expression_coeffs, + sample_shape=sample_shape, + sample_expression=sample_expression, + plotting_module=plotting_module, + use_face_contour=use_face_contour) diff --git a/utils/smplx/examples/vis_flame_vertices.py b/utils/smplx/examples/vis_flame_vertices.py new file mode 100644 index 0000000000000000000000000000000000000000..b8d6b9b33610876a9d555f87492b326b172692a7 --- /dev/null +++ b/utils/smplx/examples/vis_flame_vertices.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import os.path as osp +import argparse +import pickle + +import numpy as np +import torch +import open3d as o3d + +import smplx + + +def main(model_folder, corr_fname, ext='npz', + head_color=(0.3, 0.3, 0.6), + gender='neutral'): + + head_idxs = np.load(corr_fname) + + model = smplx.create(model_folder, model_type='smplx', + gender=gender, + ext=ext) + betas = torch.zeros([1, 10], dtype=torch.float32) + expression = torch.zeros([1, 10], dtype=torch.float32) + + output = model(betas=betas, expression=expression, + return_verts=True) + vertices = output.vertices.detach().cpu().numpy().squeeze() + joints = output.joints.detach().cpu().numpy().squeeze() + + print('Vertices shape =', vertices.shape) + print('Joints shape =', joints.shape) + + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(vertices) + mesh.triangles = o3d.utility.Vector3iVector(model.faces) + mesh.compute_vertex_normals() + + colors = np.ones_like(vertices) * [0.3, 0.3, 0.3] + colors[head_idxs] = head_color + + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) + + o3d.visualization.draw_geometries([mesh]) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='SMPL-X Demo') + + parser.add_argument('--model-folder', required=True, type=str, + help='The path to the model folder') + parser.add_argument('--corr-fname', required=True, type=str, + dest='corr_fname', + help='Filename with the head correspondences') + parser.add_argument('--gender', type=str, default='neutral', + help='The gender of the model') + parser.add_argument('--ext', type=str, default='npz', + help='Which extension to use for loading') + parser.add_argument('--head', default='right', + choices=['right', 'left'], + type=str, help='Which head to plot') + parser.add_argument('--head-color', type=float, nargs=3, dest='head_color', + default=(0.3, 0.3, 0.6), + help='Color for the head vertices') + + args = parser.parse_args() + + model_folder = osp.expanduser(osp.expandvars(args.model_folder)) + corr_fname = args.corr_fname + gender = args.gender + ext = args.ext + head = args.head + head_color = args.head_color + + main(model_folder, corr_fname, ext=ext, + head_color=head_color, + gender=gender + ) diff --git a/utils/smplx/examples/vis_mano_vertices.py b/utils/smplx/examples/vis_mano_vertices.py new file mode 100644 index 0000000000000000000000000000000000000000..1741542a1808071cc35fa1fcdef01a869885ec7e --- /dev/null +++ b/utils/smplx/examples/vis_mano_vertices.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import os.path as osp +import argparse +import pickle + +import numpy as np +import torch +import open3d as o3d + +import smplx + + +def main(model_folder, corr_fname, ext='npz', + hand_color=(0.3, 0.3, 0.6), + gender='neutral', hand='right'): + + with open(corr_fname, 'rb') as f: + idxs_data = pickle.load(f) + if hand == 'both': + hand_idxs = np.concatenate( + [idxs_data['left_hand'], idxs_data['right_hand']] + ) + else: + hand_idxs = idxs_data[f'{hand}_hand'] + + model = smplx.create(model_folder, model_type='smplx', + gender=gender, + ext=ext) + betas = torch.zeros([1, 10], dtype=torch.float32) + expression = torch.zeros([1, 10], dtype=torch.float32) + + output = model(betas=betas, expression=expression, + return_verts=True) + vertices = output.vertices.detach().cpu().numpy().squeeze() + joints = output.joints.detach().cpu().numpy().squeeze() + + print('Vertices shape =', vertices.shape) + print('Joints shape =', joints.shape) + + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(vertices) + mesh.triangles = o3d.utility.Vector3iVector(model.faces) + mesh.compute_vertex_normals() + + colors = np.ones_like(vertices) * [0.3, 0.3, 0.3] + colors[hand_idxs] = hand_color + + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) + + o3d.visualization.draw_geometries([mesh]) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='SMPL-X Demo') + + parser.add_argument('--model-folder', required=True, type=str, + help='The path to the model folder') + parser.add_argument('--corr-fname', required=True, type=str, + dest='corr_fname', + help='Filename with the hand correspondences') + parser.add_argument('--gender', type=str, default='neutral', + help='The gender of the model') + parser.add_argument('--ext', type=str, default='npz', + help='Which extension to use for loading') + parser.add_argument('--hand', default='right', + choices=['right', 'left', 'both'], + type=str, help='Which hand to plot') + parser.add_argument('--hand-color', type=float, nargs=3, dest='hand_color', + default=(0.3, 0.3, 0.6), + help='Color for the hand vertices') + + args = parser.parse_args() + + model_folder = osp.expanduser(osp.expandvars(args.model_folder)) + corr_fname = args.corr_fname + gender = args.gender + ext = args.ext + hand = args.hand + hand_color = args.hand_color + + main(model_folder, corr_fname, ext=ext, + hand_color=hand_color, + gender=gender, hand=hand + ) diff --git a/utils/smplx/setup.py b/utils/smplx/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..0496b2a2ae47157e60c6f1a1b6766404df9c7e16 --- /dev/null +++ b/utils/smplx/setup.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems and the Max Planck Institute for Biological +# Cybernetics. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import io +import os + +from setuptools import setup + +# Package meta-data. +NAME = 'smplx' +DESCRIPTION = 'PyTorch module for loading the SMPLX body model' +URL = 'http://smpl-x.is.tuebingen.mpg.de' +EMAIL = 'vassilis.choutas@tuebingen.mpg.de' +AUTHOR = 'Vassilis Choutas' +REQUIRES_PYTHON = '>=3.6.0' +VERSION = '0.1.21' + +here = os.path.abspath(os.path.dirname(__file__)) + +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + +# Import the README and use it as the long-description. +# Note: this will only work if 'README.md' is present in your MANIFEST.in file! +try: + with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = '\n' + f.read() +except FileNotFoundError: + long_description = DESCRIPTION + +# Load the package's __version__.py module as a dictionary. +about = {} +if not VERSION: + with open(os.path.join(here, NAME, '__version__.py')) as f: + exec(f.read(), about) +else: + about['__version__'] = VERSION + +pyrender_reqs = ['pyrender>=0.1.23', 'trimesh>=2.37.6', 'shapely'] +matplotlib_reqs = ['matplotlib'] +open3d_reqs = ['open3d-python'] + +setup(name=NAME, + version=about['__version__'], + description=DESCRIPTION, + long_description=long_description, + long_description_content_type='text/markdown', + author=AUTHOR, + author_email=EMAIL, + python_requires=REQUIRES_PYTHON, + url=URL, + install_requires=[ + 'numpy>=1.16.2', + 'torch>=1.0.1.post2', + 'torchgeometry>=0.1.2' + ], + extras_require={ + 'pyrender': pyrender_reqs, + 'open3d': open3d_reqs, + 'matplotlib': matplotlib_reqs, + 'all': pyrender_reqs + matplotlib_reqs + open3d_reqs + }, + packages=['smplx', 'tools']) diff --git a/utils/smplx/smplx/__init__.py b/utils/smplx/smplx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..886949df670691d1ef5995737cafa285224826c4 --- /dev/null +++ b/utils/smplx/smplx/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from .body_models import ( + create, + SMPL, + SMPLH, + SMPLX, + MANO, + FLAME, + build_layer, + SMPLLayer, + SMPLHLayer, + SMPLXLayer, + MANOLayer, + FLAMELayer, +) diff --git a/utils/smplx/smplx/__pycache__/__init__.cpython-310.pyc b/utils/smplx/smplx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d574bda63450c916a77e529a7a01b0316bd6db9 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/__init__.cpython-36.pyc b/utils/smplx/smplx/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a70d549bad6f947b14cd6b673c51dae318e0c443 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/__init__.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/__init__.cpython-37.pyc b/utils/smplx/smplx/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b0cdf4bb207bca2fde755e6d26cb29041361e2a Binary files /dev/null and b/utils/smplx/smplx/__pycache__/__init__.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/__init__.cpython-38.pyc b/utils/smplx/smplx/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de9b76dc1b3be61eea4484e047677b9d5cb76912 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/__init__.cpython-39.pyc b/utils/smplx/smplx/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb20c0d66ea4bd77ad598944eb65917d68f28829 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/__init__.cpython-39.pyc differ diff --git a/utils/smplx/smplx/__pycache__/body_models.cpython-310.pyc b/utils/smplx/smplx/__pycache__/body_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658b5258777d0b066c7905d0ac6da335ed015e97 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/body_models.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/body_models.cpython-36.pyc b/utils/smplx/smplx/__pycache__/body_models.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77f381d28d64e33d3d413a1504844343b513575e Binary files /dev/null and b/utils/smplx/smplx/__pycache__/body_models.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/body_models.cpython-37.pyc b/utils/smplx/smplx/__pycache__/body_models.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2602bcfc3e03b8427d9aafad1ff3c21a88e4a6f4 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/body_models.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/body_models.cpython-38.pyc b/utils/smplx/smplx/__pycache__/body_models.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8581b4f8ed9e6fba75767ccde5a20dd304412572 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/body_models.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/body_models.cpython-39.pyc b/utils/smplx/smplx/__pycache__/body_models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53883c114f7bf38cec0a9b5d20bffad43f304bc1 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/body_models.cpython-39.pyc differ diff --git a/utils/smplx/smplx/__pycache__/lbs.cpython-310.pyc b/utils/smplx/smplx/__pycache__/lbs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac3763aba7179d3a884997e5c19c21d970f426a6 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/lbs.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/lbs.cpython-36.pyc b/utils/smplx/smplx/__pycache__/lbs.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8e2b05fd688ac577569166e18cc0f86ac179b0e Binary files /dev/null and b/utils/smplx/smplx/__pycache__/lbs.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/lbs.cpython-37.pyc b/utils/smplx/smplx/__pycache__/lbs.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74001ab18c1af54df78bb9c68b3eb97b922ea707 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/lbs.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/lbs.cpython-38.pyc b/utils/smplx/smplx/__pycache__/lbs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12c7cd55d716103055285d3e8e744637db847710 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/lbs.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/lbs.cpython-39.pyc b/utils/smplx/smplx/__pycache__/lbs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50152df4df2e4c1e177088f8b02bb634ec27c978 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/lbs.cpython-39.pyc differ diff --git a/utils/smplx/smplx/__pycache__/utils.cpython-310.pyc b/utils/smplx/smplx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b511df577b02ff58b52e24b3f1dea0ad04d5ab30 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/utils.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/utils.cpython-36.pyc b/utils/smplx/smplx/__pycache__/utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d1d36239e990369c5ac0162fe0aced4095e7ae Binary files /dev/null and b/utils/smplx/smplx/__pycache__/utils.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/utils.cpython-37.pyc b/utils/smplx/smplx/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fdbc0d5576863be399aae39e0a5dc4256462064 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/utils.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/utils.cpython-38.pyc b/utils/smplx/smplx/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e32632eeb83f9e36170eb227b1e33aaca5f36838 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/utils.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/utils.cpython-39.pyc b/utils/smplx/smplx/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96b43510ba12bb97b61d8400e2f6c804903d7f2a Binary files /dev/null and b/utils/smplx/smplx/__pycache__/utils.cpython-39.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_ids.cpython-310.pyc b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5009bb837377482f3b0450b014a245038d85091 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_ids.cpython-36.pyc b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d9c9bf8c795aeae636373ac58427e387a5c245e Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_ids.cpython-37.pyc b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae5dd786413349985d875a1a2a00b9fe9995ad3e Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_ids.cpython-38.pyc b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6dd59a0d595c04041f57b767338fb4f17783cd0 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_ids.cpython-39.pyc b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4635fe78b16c528bd810d72e1e0b6ad132d152fc Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_ids.cpython-39.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-310.pyc b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f7d5153f54241fd6d2c0e978c76daa56d0f82b1 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-310.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-36.pyc b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dca8f1c68e9a4437213024c7a5af0e28aa40df1 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-36.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-37.pyc b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d2d75711b2cd4d958b1e0bbbd7b041e7280abad Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-37.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-38.pyc b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f15d9c59bdb8ddda52cf3ab9fc38be8202b2ff64 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-38.pyc differ diff --git a/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-39.pyc b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00c6720cf3394fc17f95278d8ef7b5a7e7b09362 Binary files /dev/null and b/utils/smplx/smplx/__pycache__/vertex_joint_selector.cpython-39.pyc differ diff --git a/utils/smplx/smplx/body_models.py b/utils/smplx/smplx/body_models.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8247613b1556fc516ac6eda07c074885d80d7a --- /dev/null +++ b/utils/smplx/smplx/body_models.py @@ -0,0 +1,2776 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from typing import Optional, Dict, Union +import os +import os.path as osp + +import pickle +import sys + + +import numpy as np + +import torch +import torch.nn as nn + +from .lbs import ( + lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords, lbs_v2) + +from .vertex_ids import vertex_ids as VERTEX_IDS +from .utils import ( + Struct, to_np, to_tensor, Tensor, Array, + SMPLOutput, + SMPLHOutput, + SMPLXOutput, + MANOOutput, + FLAMEOutput, + find_joint_kin_chain) +from .vertex_joint_selector import VertexJointSelector +from utils.config_3 import cfg + +class SMPL(nn.Module): + + NUM_JOINTS = 23 + NUM_BODY_JOINTS = 23 + SHAPE_SPACE_DIM = 300 + + def __init__( + self, model_path: str, + data_struct: Optional[Struct] = None, + create_betas: bool = True, + betas: Optional[Tensor] = None, + num_betas: int = 10, + create_global_orient: bool = True, + global_orient: Optional[Tensor] = None, + create_body_pose: bool = True, + body_pose: Optional[Tensor] = None, + create_transl: bool = True, + transl: Optional[Tensor] = None, + dtype=torch.float32, + batch_size: int = 1, + joint_mapper=None, + gender: str = 'neutral', + vertex_ids: Dict[str, int] = None, + v_template: Optional[Union[Tensor, Array]] = None, + **kwargs + ) -> None: + ''' SMPL model constructor + + Parameters + ---------- + model_path: str + The path to the folder or to the file where the model + parameters are stored + data_struct: Strct + A struct object. If given, then the parameters of the model are + read from the object. Otherwise, the model tries to read the + parameters from the given `model_path`. (default = None) + create_global_orient: bool, optional + Flag for creating a member variable for the global orientation + of the body. (default = True) + global_orient: torch.tensor, optional, Bx3 + The default value for the global orientation variable. + (default = None) + create_body_pose: bool, optional + Flag for creating a member variable for the pose of the body. + (default = True) + body_pose: torch.tensor, optional, Bx(Body Joints * 3) + The default value for the body pose variable. + (default = None) + num_betas: int, optional + Number of shape components to use + (default = 10). + create_betas: bool, optional + Flag for creating a member variable for the shape space + (default = True). + betas: torch.tensor, optional, Bx10 + The default value for the shape member variable. + (default = None) + create_transl: bool, optional + Flag for creating a member variable for the translation + of the body. (default = True) + transl: torch.tensor, optional, Bx3 + The default value for the transl variable. + (default = None) + dtype: torch.dtype, optional + The data type for the created variables + batch_size: int, optional + The batch size used for creating the member variables + joint_mapper: object, optional + An object that re-maps the joints. Useful if one wants to + re-order the SMPL joints to some other convention (e.g. MSCOCO) + (default = None) + gender: str, optional + Which gender to load + vertex_ids: dict, optional + A dictionary containing the indices of the extra vertices that + will be selected + ''' + + self.gender = gender + + if data_struct is None: + if osp.isdir(model_path): + model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl') + smpl_path = os.path.join(model_path, model_fn) + else: + smpl_path = model_path + assert osp.exists(smpl_path), 'Path {} does not exist!'.format( + smpl_path) + + with open(smpl_path, 'rb') as smpl_file: + data_struct = Struct(**pickle.load(smpl_file, + encoding='latin1')) + + super(SMPL, self).__init__() + self.batch_size = batch_size + shapedirs = data_struct.shapedirs + if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM): + print(f'WARNING: You are using a {self.name()} model, with only' + ' 10 shape coefficients.') + num_betas = min(num_betas, 10) + else: + num_betas = min(num_betas, self.SHAPE_SPACE_DIM) + + self._num_betas = num_betas + shapedirs = shapedirs[:, :, :num_betas] + # The shape components + self.register_buffer( + 'shapedirs', + to_tensor(to_np(shapedirs), dtype=dtype)) + + if vertex_ids is None: + # SMPL and SMPL-H share the same topology, so any extra joints can + # be drawn from the same place + vertex_ids = VERTEX_IDS['smplh'] + + self.dtype = dtype + + self.joint_mapper = joint_mapper + + self.vertex_joint_selector = VertexJointSelector( + vertex_ids=vertex_ids, **kwargs) + + self.faces = data_struct.f + self.register_buffer('faces_tensor', + to_tensor(to_np(self.faces, dtype=np.int64), + dtype=torch.long)) + + if create_betas: + if betas is None: + default_betas = torch.zeros( + [batch_size, self.num_betas], dtype=dtype) + else: + if torch.is_tensor(betas): + default_betas = betas.clone().detach() + else: + default_betas = torch.tensor(betas, dtype=dtype) + + self.register_parameter( + 'betas', nn.Parameter(default_betas, requires_grad=True)) + + # The tensor that contains the global rotation of the model + # It is separated from the pose of the joints in case we wish to + # optimize only over one of them + if create_global_orient: + if global_orient is None: + default_global_orient = torch.zeros( + [batch_size, 3], dtype=dtype) + else: + if torch.is_tensor(global_orient): + default_global_orient = global_orient.clone().detach() + else: + default_global_orient = torch.tensor( + global_orient, dtype=dtype) + + global_orient = nn.Parameter(default_global_orient, + requires_grad=True) + self.register_parameter('global_orient', global_orient) + + if create_body_pose: + if body_pose is None: + default_body_pose = torch.zeros( + [batch_size, self.NUM_BODY_JOINTS * 3], dtype=dtype) + else: + if torch.is_tensor(body_pose): + default_body_pose = body_pose.clone().detach() + else: + default_body_pose = torch.tensor(body_pose, + dtype=dtype) + self.register_parameter( + 'body_pose', + nn.Parameter(default_body_pose, requires_grad=True)) + + if create_transl: + if transl is None: + default_transl = torch.zeros([batch_size, 3], + dtype=dtype, + requires_grad=True) + else: + default_transl = torch.tensor(transl, dtype=dtype) + self.register_parameter( + 'transl', nn.Parameter(default_transl, requires_grad=True)) + + if v_template is None: + v_template = data_struct.v_template + if not torch.is_tensor(v_template): + v_template = to_tensor(to_np(v_template), dtype=dtype) + # The vertices of the template model + self.register_buffer('v_template', v_template) + + j_regressor = to_tensor(to_np( + data_struct.J_regressor), dtype=dtype) + self.register_buffer('J_regressor', j_regressor) + + # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207 + num_pose_basis = data_struct.posedirs.shape[-1] + # 207 x 20670 + posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T + self.register_buffer('posedirs', + to_tensor(to_np(posedirs), dtype=dtype)) + + # indices of parents for each joints + parents = to_tensor(to_np(data_struct.kintree_table[0])).long() + parents[0] = -1 + self.register_buffer('parents', parents) + + self.register_buffer( + 'lbs_weights', to_tensor(to_np(data_struct.weights), dtype=dtype)) + + @property + def num_betas(self): + return self._num_betas + + @property + def num_expression_coeffs(self): + return 0 + + def create_mean_pose(self, data_struct) -> Tensor: + pass + + def name(self) -> str: + return 'SMPL' + + @torch.no_grad() + def reset_params(self, **params_dict) -> None: + for param_name, param in self.named_parameters(): + if param_name in params_dict: + param[:] = torch.tensor(params_dict[param_name]) + else: + param.fill_(0) + + def get_num_verts(self) -> int: + return self.v_template.shape[0] + + def get_num_faces(self) -> int: + return self.faces.shape[0] + + def extra_repr(self) -> str: + msg = [ + f'Gender: {self.gender.upper()}', + f'Number of joints: {self.J_regressor.shape[0]}', + f'Betas: {self.num_betas}', + ] + return '\n'.join(msg) + + def forward( + self, + betas: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts=True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> SMPLOutput: + ''' Forward pass for the SMPL model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + body_pose: torch.tensor, optional, shape Bx(J*3) + If given, ignore the member variable `body_pose` and use it + instead. For example, it can used if someone predicts the + pose of the body joints are predicted from some external model. + It should be a tensor that contains joint rotations in + axis-angle format. (default=None) + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full axis-angle pose vector (default=False) + + Returns + ------- + ''' + # If no shape and pose parameters are passed along, then use the + # ones from the module + global_orient = (global_orient if global_orient is not None else + self.global_orient) + body_pose = body_pose if body_pose is not None else self.body_pose + betas = betas if betas is not None else self.betas + + apply_trans = transl is not None or hasattr(self, 'transl') + if transl is None and hasattr(self, 'transl'): + transl = self.transl + + full_pose = torch.cat([global_orient, body_pose], dim=1) + + batch_size = max(betas.shape[0], global_orient.shape[0], + body_pose.shape[0]) + + if betas.shape[0] != batch_size: + num_repeats = int(batch_size / betas.shape[0]) + betas = betas.expand(num_repeats, -1) + + vertices, joints = lbs(betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=pose2rot) + + joints = self.vertex_joint_selector(vertices, joints) + # Map the joints to the current dataset + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if apply_trans: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLOutput(vertices=vertices if return_verts else None, + global_orient=global_orient, + body_pose=body_pose, + joints=joints, + betas=betas, + full_pose=full_pose if return_full_pose else None) + + return output + + +class SMPLLayer(SMPL): + def __init__( + self, + *args, + **kwargs + ) -> None: + # Just create a SMPL module without any member variables + super(SMPLLayer, self).__init__( + create_body_pose=False, + create_betas=False, + create_global_orient=False, + create_transl=False, + *args, + **kwargs, + ) + + def forward( + self, + betas: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts=True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> SMPLOutput: + ''' Forward pass for the SMPL model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + body_pose: torch.tensor, optional, shape Bx(J*3) + If given, ignore the member variable `body_pose` and use it + instead. For example, it can used if someone predicts the + pose of the body joints are predicted from some external model. + It should be a tensor that contains joint rotations in + axis-angle format. (default=None) + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full axis-angle pose vector (default=False) + + Returns + ------- + ''' + device, dtype = self.shapedirs.device, self.shapedirs.dtype + if global_orient is None: + batch_size = 1 + global_orient = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 1, 1).contiguous() + else: + batch_size = global_orient.shape[0] + if body_pose is None: + body_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand( + batch_size, self.NUM_BODY_JOINTS, 1).contiguous() + if betas is None: + betas = torch.zeros([batch_size, self.num_betas], + dtype=dtype, device=device) + if transl is None: + transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) + full_pose = torch.cat( + [global_orient.reshape(-1, 1, 3), + body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3)], + dim=1) + + vertices, joints = lbs(betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, + pose2rot=True) + + joints = self.vertex_joint_selector(vertices, joints) + # Map the joints to the current dataset + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if transl is not None: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLOutput(vertices=vertices if return_verts else None, + global_orient=global_orient, + body_pose=body_pose, + joints=joints, + betas=betas, + full_pose=full_pose if return_full_pose else None) + + return output + + +class SMPLH(SMPL): + + # The hand joints are replaced by MANO + NUM_BODY_JOINTS = SMPL.NUM_JOINTS - 2 + NUM_HAND_JOINTS = 15 + NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + + def __init__( + self, model_path, + data_struct: Optional[Struct] = None, + create_left_hand_pose: bool = True, + left_hand_pose: Optional[Tensor] = None, + create_right_hand_pose: bool = True, + right_hand_pose: Optional[Tensor] = None, + use_pca: bool = True, + num_pca_comps: int = 6, + flat_hand_mean: bool = False, + batch_size: int = 1, + gender: str = 'neutral', + dtype=torch.float32, + vertex_ids=None, + use_compressed: bool = True, + ext: str = 'pkl', + **kwargs + ) -> None: + ''' SMPLH model constructor + + Parameters + ---------- + model_path: str + The path to the folder or to the file where the model + parameters are stored + data_struct: Strct + A struct object. If given, then the parameters of the model are + read from the object. Otherwise, the model tries to read the + parameters from the given `model_path`. (default = None) + create_left_hand_pose: bool, optional + Flag for creating a member variable for the pose of the left + hand. (default = True) + left_hand_pose: torch.tensor, optional, BxP + The default value for the left hand pose member variable. + (default = None) + create_right_hand_pose: bool, optional + Flag for creating a member variable for the pose of the right + hand. (default = True) + right_hand_pose: torch.tensor, optional, BxP + The default value for the right hand pose member variable. + (default = None) + num_pca_comps: int, optional + The number of PCA components to use for each hand. + (default = 6) + flat_hand_mean: bool, optional + If False, then the pose of the hand is initialized to False. + batch_size: int, optional + The batch size used for creating the member variables + gender: str, optional + Which gender to load + dtype: torch.dtype, optional + The data type for the created variables + vertex_ids: dict, optional + A dictionary containing the indices of the extra vertices that + will be selected + ''' + + self.num_pca_comps = num_pca_comps + # If no data structure is passed, then load the data from the given + # model folder + if data_struct is None: + # Load the model + if osp.isdir(model_path): + model_fn = 'SMPLH_{}.{ext}'.format(gender.upper(), ext=ext) + smplh_path = os.path.join(model_path, model_fn) + else: + smplh_path = model_path + assert osp.exists(smplh_path), 'Path {} does not exist!'.format( + smplh_path) + + if ext == 'pkl': + with open(smplh_path, 'rb') as smplh_file: + model_data = pickle.load(smplh_file, encoding='latin1') + elif ext == 'npz': + model_data = np.load(smplh_path, allow_pickle=True) + else: + raise ValueError('Unknown extension: {}'.format(ext)) + data_struct = Struct(**model_data) + + if vertex_ids is None: + vertex_ids = VERTEX_IDS['smplh'] + + super(SMPLH, self).__init__( + model_path=model_path, + data_struct=data_struct, + batch_size=batch_size, vertex_ids=vertex_ids, gender=gender, + use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs) + + self.use_pca = use_pca + self.num_pca_comps = num_pca_comps + self.flat_hand_mean = flat_hand_mean + + left_hand_components = data_struct.hands_componentsl[:num_pca_comps] + right_hand_components = data_struct.hands_componentsr[:num_pca_comps] + + self.np_left_hand_components = left_hand_components + self.np_right_hand_components = right_hand_components + if self.use_pca: + self.register_buffer( + 'left_hand_components', + torch.tensor(left_hand_components, dtype=dtype)) + self.register_buffer( + 'right_hand_components', + torch.tensor(right_hand_components, dtype=dtype)) + + if self.flat_hand_mean: + left_hand_mean = np.zeros_like(data_struct.hands_meanl) + else: + left_hand_mean = data_struct.hands_meanl + + if self.flat_hand_mean: + right_hand_mean = np.zeros_like(data_struct.hands_meanr) + else: + right_hand_mean = data_struct.hands_meanr + + self.register_buffer('left_hand_mean', + to_tensor(left_hand_mean, dtype=self.dtype)) + self.register_buffer('right_hand_mean', + to_tensor(right_hand_mean, dtype=self.dtype)) + + # Create the buffers for the pose of the left hand + hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS + if create_left_hand_pose: + if left_hand_pose is None: + default_lhand_pose = torch.zeros([batch_size, hand_pose_dim], + dtype=dtype) + else: + default_lhand_pose = torch.tensor(left_hand_pose, dtype=dtype) + + left_hand_pose_param = nn.Parameter(default_lhand_pose, + requires_grad=True) + self.register_parameter('left_hand_pose', + left_hand_pose_param) + + if create_right_hand_pose: + if right_hand_pose is None: + default_rhand_pose = torch.zeros([batch_size, hand_pose_dim], + dtype=dtype) + else: + default_rhand_pose = torch.tensor(right_hand_pose, dtype=dtype) + + right_hand_pose_param = nn.Parameter(default_rhand_pose, + requires_grad=True) + self.register_parameter('right_hand_pose', + right_hand_pose_param) + + # Create the buffer for the mean pose. + pose_mean_tensor = self.create_mean_pose( + data_struct, flat_hand_mean=flat_hand_mean) + if not torch.is_tensor(pose_mean_tensor): + pose_mean_tensor = torch.tensor(pose_mean_tensor, dtype=dtype) + self.register_buffer('pose_mean', pose_mean_tensor) + + def create_mean_pose(self, data_struct, flat_hand_mean=False): + # Create the array for the mean pose. If flat_hand is false, then use + # the mean that is given by the data, rather than the flat open hand + global_orient_mean = torch.zeros([3], dtype=self.dtype) + body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3], + dtype=self.dtype) + + pose_mean = torch.cat([global_orient_mean, body_pose_mean, + self.left_hand_mean, + self.right_hand_mean], dim=0) + return pose_mean + + def name(self) -> str: + return 'SMPL+H' + + def extra_repr(self): + msg = super(SMPLH, self).extra_repr() + msg = [msg] + if self.use_pca: + msg.append(f'Number of PCA components: {self.num_pca_comps}') + msg.append(f'Flat hand mean: {self.flat_hand_mean}') + return '\n'.join(msg) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + left_hand_pose: Optional[Tensor] = None, + right_hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> SMPLHOutput: + ''' + ''' + # If no shape and pose parameters are passed along, then use the + # ones from the module + global_orient = (global_orient if global_orient is not None else + self.global_orient) + body_pose = body_pose if body_pose is not None else self.body_pose + betas = betas if betas is not None else self.betas + left_hand_pose = (left_hand_pose if left_hand_pose is not None else + self.left_hand_pose) + right_hand_pose = (right_hand_pose if right_hand_pose is not None else + self.right_hand_pose) + + apply_trans = transl is not None or hasattr(self, 'transl') + if transl is None: + if hasattr(self, 'transl'): + transl = self.transl + + if self.use_pca: + left_hand_pose = torch.einsum( + 'bi,ij->bj', [left_hand_pose, self.left_hand_components]) + right_hand_pose = torch.einsum( + 'bi,ij->bj', [right_hand_pose, self.right_hand_components]) + + full_pose = torch.cat([global_orient, body_pose, + left_hand_pose, + right_hand_pose], dim=1) + full_pose += self.pose_mean + + vertices, joints = lbs(self.betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=pose2rot) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if apply_trans: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLHOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + global_orient=global_orient, + body_pose=body_pose, + left_hand_pose=left_hand_pose, + right_hand_pose=right_hand_pose, + full_pose=full_pose if return_full_pose else None) + + return output + + +class SMPLHLayer(SMPLH): + + def __init__( + self, *args, **kwargs + ) -> None: + ''' SMPL+H as a layer model constructor + ''' + super(SMPLHLayer, self).__init__( + create_global_orient=False, + create_body_pose=False, + create_left_hand_pose=False, + create_right_hand_pose=False, + create_betas=False, + create_transl=False, + *args, + **kwargs) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + left_hand_pose: Optional[Tensor] = None, + right_hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> SMPLHOutput: + ''' + ''' + device, dtype = self.shapedirs.device, self.shapedirs.dtype + if global_orient is None: + batch_size = 1 + global_orient = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + else: + batch_size = global_orient.shape[0] + if body_pose is None: + body_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 21, -1).contiguous() + if left_hand_pose is None: + left_hand_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 15, -1).contiguous() + if right_hand_pose is None: + right_hand_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 15, -1).contiguous() + if betas is None: + betas = torch.zeros([batch_size, self.num_betas], + dtype=dtype, device=device) + if transl is None: + transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) + + # Concatenate all pose vectors + full_pose = torch.cat( + [global_orient.reshape(-1, 1, 3), + body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3), + left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3), + right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3)], + dim=1) + + vertices, joints = lbs(betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=True) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if transl is not None: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLHOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + global_orient=global_orient, + body_pose=body_pose, + left_hand_pose=left_hand_pose, + right_hand_pose=right_hand_pose, + full_pose=full_pose if return_full_pose else None) + + return output + + +# class SMPLX(SMPLH): +# ''' +# SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters +# trained jointly for the face, hands and body. +# SMPL-X uses standard vertex based linear blend skinning with learned +# corrective blend shapes, has N=10475 vertices and K=54 joints, +# which includes joints for the neck, jaw, eyeballs and fingers. +# ''' +# +# NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS +# NUM_HAND_JOINTS = 15 +# NUM_FACE_JOINTS = 3 +# NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS +# EXPRESSION_SPACE_DIM = 100 +# NECK_IDX = 12 +# +# def __init__( +# self, model_path: str, +# num_expression_coeffs: int = 10, +# create_expression: bool = True, +# expression: Optional[Tensor] = None, +# create_jaw_pose: bool = True, +# jaw_pose: Optional[Tensor] = None, +# create_leye_pose: bool = True, +# leye_pose: Optional[Tensor] = None, +# create_reye_pose=True, +# reye_pose: Optional[Tensor] = None, +# use_face_contour: bool = False, +# batch_size: int = 1, +# gender: str = 'neutral', +# dtype=torch.float32, +# ext: str = 'npz', +# **kwargs +# ) -> None: +# ''' SMPLX model constructor +# +# Parameters +# ---------- +# model_path: str +# The path to the folder or to the file where the model +# parameters are stored +# num_expression_coeffs: int, optional +# Number of expression components to use +# (default = 10). +# create_expression: bool, optional +# Flag for creating a member variable for the expression space +# (default = True). +# expression: torch.tensor, optional, Bx10 +# The default value for the expression member variable. +# (default = None) +# create_jaw_pose: bool, optional +# Flag for creating a member variable for the jaw pose. +# (default = False) +# jaw_pose: torch.tensor, optional, Bx3 +# The default value for the jaw pose variable. +# (default = None) +# create_leye_pose: bool, optional +# Flag for creating a member variable for the left eye pose. +# (default = False) +# leye_pose: torch.tensor, optional, Bx10 +# The default value for the left eye pose variable. +# (default = None) +# create_reye_pose: bool, optional +# Flag for creating a member variable for the right eye pose. +# (default = False) +# reye_pose: torch.tensor, optional, Bx10 +# The default value for the right eye pose variable. +# (default = None) +# use_face_contour: bool, optional +# Whether to compute the keypoints that form the facial contour +# batch_size: int, optional +# The batch size used for creating the member variables +# gender: str, optional +# Which gender to load +# dtype: torch.dtype +# The data type for the created variables +# ''' +# +# # Load the model +# if cfg.modify_root_joint: +# smplx_path = '/home/linjing/code/Hand4Whole_RELEASE-Fitting/common/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npy' +# model_data = np.load(smplx_path, allow_pickle=True).item() +# else: +# if osp.isdir(model_path): +# model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext) +# smplx_path = os.path.join(model_path, model_fn) +# else: +# smplx_path = model_path +# assert osp.exists(smplx_path), 'Path {} does not exist!'.format( +# smplx_path) +# +# if ext == 'pkl': +# with open(smplx_path, 'rb') as smplx_file: +# model_data = pickle.load(smplx_file, encoding='latin1') +# elif ext == 'npz': +# model_data = np.load(smplx_path, allow_pickle=True) +# else: +# raise ValueError('Unknown extension: {}'.format(ext)) +# +# data_struct = Struct(**model_data) +# +# super(SMPLX, self).__init__( +# model_path=model_path, +# data_struct=data_struct, +# dtype=dtype, +# batch_size=batch_size, +# vertex_ids=VERTEX_IDS['smplx'], +# gender=gender, ext=ext, +# **kwargs) +# +# lmk_faces_idx = data_struct.lmk_faces_idx +# self.register_buffer('lmk_faces_idx', +# torch.tensor(lmk_faces_idx, dtype=torch.long)) +# lmk_bary_coords = data_struct.lmk_bary_coords +# self.register_buffer('lmk_bary_coords', +# torch.tensor(lmk_bary_coords, dtype=dtype)) +# +# self.use_face_contour = use_face_contour +# if self.use_face_contour: +# dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx +# dynamic_lmk_faces_idx = torch.tensor( +# dynamic_lmk_faces_idx, +# dtype=torch.long) +# self.register_buffer('dynamic_lmk_faces_idx', +# dynamic_lmk_faces_idx) +# +# dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords +# dynamic_lmk_bary_coords = torch.tensor( +# dynamic_lmk_bary_coords, dtype=dtype) +# self.register_buffer('dynamic_lmk_bary_coords', +# dynamic_lmk_bary_coords) +# +# neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents) +# self.register_buffer( +# 'neck_kin_chain', +# torch.tensor(neck_kin_chain, dtype=torch.long)) +# +# if create_jaw_pose: +# if jaw_pose is None: +# default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype) +# else: +# default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype) +# jaw_pose_param = nn.Parameter(default_jaw_pose, +# requires_grad=True) +# self.register_parameter('jaw_pose', jaw_pose_param) +# +# if create_leye_pose: +# if leye_pose is None: +# default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype) +# else: +# default_leye_pose = torch.tensor(leye_pose, dtype=dtype) +# leye_pose_param = nn.Parameter(default_leye_pose, +# requires_grad=True) +# self.register_parameter('leye_pose', leye_pose_param) +# +# if create_reye_pose: +# if reye_pose is None: +# default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype) +# else: +# default_reye_pose = torch.tensor(reye_pose, dtype=dtype) +# reye_pose_param = nn.Parameter(default_reye_pose, +# requires_grad=True) +# self.register_parameter('reye_pose', reye_pose_param) +# +# +# +# shapedirs = data_struct.shapedirs +# if len(shapedirs.shape) < 3: +# shapedirs = shapedirs[:, :, None] +# if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM + +# self.EXPRESSION_SPACE_DIM): +# print(f'WARNING: You are using a {self.name()} model, with only' +# ' 10 shape and 10 expression coefficients.') +# expr_start_idx = 10 +# expr_end_idx = 20 +# num_expression_coeffs = min(num_expression_coeffs, 10) +# else: +# expr_start_idx = self.SHAPE_SPACE_DIM +# expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs +# num_expression_coeffs = min( +# num_expression_coeffs, self.EXPRESSION_SPACE_DIM) +# +# self._num_expression_coeffs = num_expression_coeffs +# +# expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx] +# self.register_buffer( +# 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype)) +# +# if create_expression: +# if expression is None: +# default_expression = torch.zeros( +# [batch_size, self.num_expression_coeffs], dtype=dtype) +# else: +# default_expression = torch.tensor(expression, dtype=dtype) +# expression_param = nn.Parameter(default_expression, +# requires_grad=True) +# self.register_parameter('expression', expression_param) +# +# def name(self) -> str: +# return 'SMPL-X' +# +# @property +# def num_expression_coeffs(self): +# return self._num_expression_coeffs +# +# def create_mean_pose(self, data_struct, flat_hand_mean=False): +# # Create the array for the mean pose. If flat_hand is false, then use +# # the mean that is given by the data, rather than the flat open hand +# global_orient_mean = torch.zeros([3], dtype=self.dtype) +# body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3], +# dtype=self.dtype) +# jaw_pose_mean = torch.zeros([3], dtype=self.dtype) +# leye_pose_mean = torch.zeros([3], dtype=self.dtype) +# reye_pose_mean = torch.zeros([3], dtype=self.dtype) +# +# pose_mean = np.concatenate([global_orient_mean, body_pose_mean, +# jaw_pose_mean, +# leye_pose_mean, reye_pose_mean, +# self.left_hand_mean, self.right_hand_mean], +# axis=0) +# +# return pose_mean +# +# def extra_repr(self): +# msg = super(SMPLX, self).extra_repr() +# msg = [ +# msg, +# f'Number of Expression Coefficients: {self.num_expression_coeffs}' +# ] +# return '\n'.join(msg) +# +# def forward( +# self, +# betas: Optional[Tensor] = None, +# global_orient: Optional[Tensor] = None, +# body_pose: Optional[Tensor] = None, +# left_hand_pose: Optional[Tensor] = None, +# right_hand_pose: Optional[Tensor] = None, +# transl: Optional[Tensor] = None, +# expression: Optional[Tensor] = None, +# jaw_pose: Optional[Tensor] = None, +# leye_pose: Optional[Tensor] = None, +# reye_pose: Optional[Tensor] = None, +# return_verts: bool = True, +# return_full_pose: bool = False, +# pose2rot: bool = True, +# **kwargs +# ) -> SMPLXOutput: +# ''' +# Forward pass for the SMPLX model +# +# Parameters +# ---------- +# global_orient: torch.tensor, optional, shape Bx3 +# If given, ignore the member variable and use it as the global +# rotation of the body. Useful if someone wishes to predicts this +# with an external model. (default=None) +# betas: torch.tensor, optional, shape Bx10 +# If given, ignore the member variable `betas` and use it +# instead. For example, it can used if shape parameters +# `betas` are predicted from some external model. +# (default=None) +# expression: torch.tensor, optional, shape Bx10 +# If given, ignore the member variable `expression` and use it +# instead. For example, it can used if expression parameters +# `expression` are predicted from some external model. +# body_pose: torch.tensor, optional, shape Bx(J*3) +# If given, ignore the member variable `body_pose` and use it +# instead. For example, it can used if someone predicts the +# pose of the body joints are predicted from some external model. +# It should be a tensor that contains joint rotations in +# axis-angle format. (default=None) +# left_hand_pose: torch.tensor, optional, shape BxP +# If given, ignore the member variable `left_hand_pose` and +# use this instead. It should either contain PCA coefficients or +# joint rotations in axis-angle format. +# right_hand_pose: torch.tensor, optional, shape BxP +# If given, ignore the member variable `right_hand_pose` and +# use this instead. It should either contain PCA coefficients or +# joint rotations in axis-angle format. +# jaw_pose: torch.tensor, optional, shape Bx3 +# If given, ignore the member variable `jaw_pose` and +# use this instead. It should either joint rotations in +# axis-angle format. +# transl: torch.tensor, optional, shape Bx3 +# If given, ignore the member variable `transl` and use it +# instead. For example, it can used if the translation +# `transl` is predicted from some external model. +# (default=None) +# return_verts: bool, optional +# Return the vertices. (default=True) +# return_full_pose: bool, optional +# Returns the full axis-angle pose vector (default=False) +# +# Returns +# ------- +# output: ModelOutput +# A named tuple of type `ModelOutput` +# ''' +# +# # If no shape and pose parameters are passed along, then use the +# # ones from the module +# global_orient = (global_orient if global_orient is not None else +# self.global_orient) +# body_pose = body_pose if body_pose is not None else self.body_pose +# betas = betas if betas is not None else self.betas +# +# left_hand_pose = (left_hand_pose if left_hand_pose is not None else +# self.left_hand_pose) +# right_hand_pose = (right_hand_pose if right_hand_pose is not None else +# self.right_hand_pose) +# jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose +# leye_pose = leye_pose if leye_pose is not None else self.leye_pose +# reye_pose = reye_pose if reye_pose is not None else self.reye_pose +# expression = expression if expression is not None else self.expression +# +# apply_trans = transl is not None or hasattr(self, 'transl') +# if transl is None: +# if hasattr(self, 'transl'): +# transl = self.transl +# +# if self.use_pca: +# left_hand_pose = torch.einsum( +# 'bi,ij->bj', [left_hand_pose, self.left_hand_components]) +# right_hand_pose = torch.einsum( +# 'bi,ij->bj', [right_hand_pose, self.right_hand_components]) +# +# full_pose = torch.cat([global_orient, body_pose, +# jaw_pose, leye_pose, reye_pose, +# left_hand_pose, +# right_hand_pose], dim=1) +# +# # Add the mean pose of the model. Does not affect the body, only the +# # hands when flat_hand_mean == False +# # print(full_pose.shape, self.pose_mean.shape) +# full_pose += self.pose_mean +# +# batch_size = max(betas.shape[0], global_orient.shape[0], +# body_pose.shape[0]) +# # Concatenate the shape and expression coefficients +# scale = int(batch_size / betas.shape[0]) +# if scale > 1: +# betas = betas.expand(scale, -1) +# shape_components = torch.cat([betas, expression], dim=-1) +# +# shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) +# +# vertices, joints = lbs(shape_components, full_pose, self.v_template, +# shapedirs, self.posedirs, +# self.J_regressor, self.parents, +# self.lbs_weights, pose2rot=pose2rot, +# ) +# # print(joints.shape) +# # print(self.J_regressor.shape) +# lmk_faces_idx = self.lmk_faces_idx.unsqueeze( +# dim=0).expand(batch_size, -1).contiguous() +# lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( +# self.batch_size, 1, 1) +# if self.use_face_contour: +# lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( +# vertices, full_pose, self.dynamic_lmk_faces_idx, +# self.dynamic_lmk_bary_coords, +# self.neck_kin_chain, +# pose2rot=True, +# ) +# dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords +# +# lmk_faces_idx = torch.cat([lmk_faces_idx, +# dyn_lmk_faces_idx], 1) +# lmk_bary_coords = torch.cat( +# [lmk_bary_coords.expand(batch_size, -1, -1), +# dyn_lmk_bary_coords], 1) +# +# landmarks = vertices2landmarks(vertices, self.faces_tensor, +# lmk_faces_idx, +# lmk_bary_coords) +# +# # Add any extra joints that might be needed +# joints = self.vertex_joint_selector(vertices, joints) +# # Add the landmarks to the joints +# joints = torch.cat([joints, landmarks], dim=1) +# # Map the joints to the current dataset +# +# if self.joint_mapper is not None: +# joints = self.joint_mapper(joints=joints, vertices=vertices) +# +# if apply_trans: +# joints += transl.unsqueeze(dim=1) +# vertices += transl.unsqueeze(dim=1) +# +# output = SMPLXOutput(vertices=vertices if return_verts else None, +# joints=joints, +# betas=betas, +# expression=expression, +# global_orient=global_orient, +# body_pose=body_pose, +# left_hand_pose=left_hand_pose, +# right_hand_pose=right_hand_pose, +# jaw_pose=jaw_pose, +# full_pose=full_pose if return_full_pose else None) +# return output + +class SMPLX(SMPLH): + ''' + SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters + trained jointly for the face, hands and body. + SMPL-X uses standard vertex based linear blend skinning with learned + corrective blend shapes, has N=10475 vertices and K=54 joints, + which includes joints for the neck, jaw, eyeballs and fingers. + Compared with original SMPLX model, we have face shape parameter (size=100) and expression parameter (size=50) + ''' + + NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS + NUM_HAND_JOINTS = 15 + NUM_FACE_JOINTS = 3 + NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS + EXPRESSION_SPACE_DIM = 100 + NECK_IDX = 12 + + def __init__( + self, model_path: str, + num_expression_coeffs: int = 10, + create_expression: bool = True, + expression: Optional[Tensor] = None, + create_jaw_pose: bool = True, + jaw_pose: Optional[Tensor] = None, + create_leye_pose: bool = True, + leye_pose: Optional[Tensor] = None, + create_reye_pose=True, + reye_pose: Optional[Tensor] = None, + use_face_contour: bool = False, + batch_size: int = 1, + gender: str = 'neutral', + dtype=torch.float32, + ext: str = 'npz', + **kwargs + ) -> None: + ''' SMPLX model constructor + + Parameters + ---------- + model_path: str + The path to the folder or to the file where the model + parameters are stored + num_expression_coeffs: int, optional + Number of expression components to use + (default = 10). + create_expression: bool, optional + Flag for creating a member variable for the expression space + (default = True). + expression: torch.tensor, optional, Bx10 + The default value for the expression member variable. + (default = None) + create_jaw_pose: bool, optional + Flag for creating a member variable for the jaw pose. + (default = False) + jaw_pose: torch.tensor, optional, Bx3 + The default value for the jaw pose variable. + (default = None) + create_leye_pose: bool, optional + Flag for creating a member variable for the left eye pose. + (default = False) + leye_pose: torch.tensor, optional, Bx10 + The default value for the left eye pose variable. + (default = None) + create_reye_pose: bool, optional + Flag for creating a member variable for the right eye pose. + (default = False) + reye_pose: torch.tensor, optional, Bx10 + The default value for the right eye pose variable. + (default = None) + use_face_contour: bool, optional + Whether to compute the keypoints that form the facial contour + batch_size: int, optional + The batch size used for creating the member variables + gender: str, optional + Which gender to load + dtype: torch.dtype + The data type for the created variables + ''' + + # Load the model + # import pdb; pdb.set_trace() + if cfg.use_flame and not cfg.modify_root_joint: + smplx_path = './body_models/human_model_files/smplx/SMPLX_NEUTRAL_WiFlame.npy' + model_data = np.load(smplx_path, allow_pickle=True).item() + elif cfg.use_flame and cfg.modify_root_joint: + smplx_path = '.body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW_WiFlame.npy' + model_data = np.load(smplx_path, allow_pickle=True).item() + elif not cfg.use_flame and cfg.modify_root_joint: + smplx_path = '.body_models/human_model_files/smplx/SMPLX_NEUTRAL_NEW.npy' + model_data = np.load(smplx_path, allow_pickle=True).item() + else: + if osp.isdir(model_path): + model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext) + smplx_path = os.path.join(model_path, model_fn) + else: + smplx_path = model_path + assert osp.exists(smplx_path), 'Path {} does not exist!'.format( + smplx_path) + + if ext == 'pkl': + with open(smplx_path, 'rb') as smplx_file: + model_data = pickle.load(smplx_file, encoding='latin1') + elif ext == 'npz': + model_data = np.load(smplx_path, allow_pickle=True) + else: + raise ValueError('Unknown extension: {}'.format(ext)) + + data_struct = Struct(**model_data) + + super(SMPLX, self).__init__( + model_path=model_path, + data_struct=data_struct, + dtype=dtype, + batch_size=batch_size, + vertex_ids=VERTEX_IDS['smplx'], + gender=gender, ext=ext, + **kwargs) + + lmk_faces_idx = data_struct.lmk_faces_idx + self.register_buffer('lmk_faces_idx', torch.tensor(lmk_faces_idx, dtype=torch.long)) + lmk_bary_coords = data_struct.lmk_bary_coords + self.register_buffer('lmk_bary_coords', torch.tensor(lmk_bary_coords, dtype=dtype)) + + self.use_face_contour = use_face_contour + if self.use_face_contour: + dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx + dynamic_lmk_faces_idx = torch.tensor( + dynamic_lmk_faces_idx, + dtype=torch.long) + self.register_buffer('dynamic_lmk_faces_idx', + dynamic_lmk_faces_idx) + + dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords + dynamic_lmk_bary_coords = torch.tensor( + dynamic_lmk_bary_coords, dtype=dtype) + self.register_buffer('dynamic_lmk_bary_coords', + dynamic_lmk_bary_coords) + + neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents) + self.register_buffer( + 'neck_kin_chain', + torch.tensor(neck_kin_chain, dtype=torch.long)) + + if create_jaw_pose: + if jaw_pose is None: + default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype) + jaw_pose_param = nn.Parameter(default_jaw_pose, + requires_grad=True) + self.register_parameter('jaw_pose', jaw_pose_param) + + if create_leye_pose: + if leye_pose is None: + default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_leye_pose = torch.tensor(leye_pose, dtype=dtype) + leye_pose_param = nn.Parameter(default_leye_pose, + requires_grad=True) + self.register_parameter('leye_pose', leye_pose_param) + + if create_reye_pose: + if reye_pose is None: + default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_reye_pose = torch.tensor(reye_pose, dtype=dtype) + reye_pose_param = nn.Parameter(default_reye_pose, + requires_grad=True) + self.register_parameter('reye_pose', reye_pose_param) + + shapedirs = data_struct.shapedirs + if len(shapedirs.shape) < 3: + shapedirs = shapedirs[:, :, None] + if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM + + self.EXPRESSION_SPACE_DIM): + print(f'WARNING: You are using a {self.name()} model, with only' + ' 10 shape and 10 expression coefficients.') + expr_start_idx = 10 + expr_end_idx = 20 + num_expression_coeffs = min(num_expression_coeffs, 10) + else: + expr_start_idx = self.SHAPE_SPACE_DIM + expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs + num_expression_coeffs = min( + num_expression_coeffs, self.EXPRESSION_SPACE_DIM) + + self._num_expression_coeffs = num_expression_coeffs + + expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx] + self.register_buffer( + 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype)) + if cfg.use_flame: + print('Use FLAME shape and expression parameters.') + flame_shapedirs = data_struct.flame_shapedirs + self.register_buffer( + 'flame_shapedirs', to_tensor(to_np(flame_shapedirs), dtype=dtype)) + + # Fixing remaining Shape betas + # There are total 300 shape parameters to control FLAME; But one can use the first few parameters to express + # the shape. For example 100 shape parameters are used for RingNet project + flame_default_shape = torch.zeros([1, 300 - cfg.flame_shape_params], dtype=self.dtype, requires_grad=False) + self.register_parameter('flame_shape_betas', nn.Parameter(flame_default_shape, requires_grad=False)) + + # Fixing remaining expression betas + # There are total 100 shape expression parameters to control FLAME; But one can use the first few parameters to express + # the expression. For example 50 expression parameters are used for RingNet project + default_exp = torch.zeros([1, 100 - cfg.flame_expression_params], dtype=self.dtype, requires_grad=False) + self.register_parameter('flame_expression_betas', nn.Parameter(default_exp, requires_grad=False)) + self.head_idxs = np.load(cfg.face_corr_fname) + + if create_expression: + if expression is None: + default_expression = torch.zeros( + [batch_size, self.num_expression_coeffs], dtype=dtype) + else: + default_expression = torch.tensor(expression, dtype=dtype) + expression_param = nn.Parameter(default_expression, requires_grad=True) + self.register_parameter('expression', expression_param) + + def name(self) -> str: + return 'SMPL-X-v2' + + @property + def num_expression_coeffs(self): + return self._num_expression_coeffs + + def create_mean_pose(self, data_struct, flat_hand_mean=False): + # Create the array for the mean pose. If flat_hand is false, then use + # the mean that is given by the data, rather than the flat open hand + global_orient_mean = torch.zeros([3], dtype=self.dtype) + body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3], + dtype=self.dtype) + jaw_pose_mean = torch.zeros([3], dtype=self.dtype) + leye_pose_mean = torch.zeros([3], dtype=self.dtype) + reye_pose_mean = torch.zeros([3], dtype=self.dtype) + + pose_mean = np.concatenate([global_orient_mean, body_pose_mean, + jaw_pose_mean, + leye_pose_mean, reye_pose_mean, + self.left_hand_mean, self.right_hand_mean], + axis=0) + + return pose_mean + + def extra_repr(self): + msg = super(SMPLX, self).extra_repr() + msg = [ + msg, + f'Number of Expression Coefficients: {self.num_expression_coeffs}' + ] + return '\n'.join(msg) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + left_hand_pose: Optional[Tensor] = None, + right_hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + expression: Optional[Tensor] = None, + flame_betas: Optional[Tensor] = None, + flame_expression: Optional[Tensor] = None, + jaw_pose: Optional[Tensor] = None, + leye_pose: Optional[Tensor] = None, + reye_pose: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> SMPLXOutput: + ''' + Forward pass for the SMPLX model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + expression: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `expression` and use it + instead. For example, it can used if expression parameters + `expression` are predicted from some external model. + body_pose: torch.tensor, optional, shape Bx(J*3) + If given, ignore the member variable `body_pose` and use it + instead. For example, it can used if someone predicts the + pose of the body joints are predicted from some external model. + It should be a tensor that contains joint rotations in + axis-angle format. (default=None) + left_hand_pose: torch.tensor, optional, shape BxP + If given, ignore the member variable `left_hand_pose` and + use this instead. It should either contain PCA coefficients or + joint rotations in axis-angle format. + right_hand_pose: torch.tensor, optional, shape BxP + If given, ignore the member variable `right_hand_pose` and + use this instead. It should either contain PCA coefficients or + joint rotations in axis-angle format. + jaw_pose: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full axis-angle pose vector (default=False) + + Returns + ------- + output: ModelOutput + A named tuple of type `ModelOutput` + ''' + + # If no shape and pose parameters are passed along, then use the + # ones from the module + global_orient = (global_orient if global_orient is not None else + self.global_orient) + body_pose = body_pose if body_pose is not None else self.body_pose + betas = betas if betas is not None else self.betas + + left_hand_pose = (left_hand_pose if left_hand_pose is not None else + self.left_hand_pose) + right_hand_pose = (right_hand_pose if right_hand_pose is not None else + self.right_hand_pose) + jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose + leye_pose = leye_pose if leye_pose is not None else self.leye_pose + reye_pose = reye_pose if reye_pose is not None else self.reye_pose + expression = expression if expression is not None else self.expression + + apply_trans = transl is not None or hasattr(self, 'transl') + if transl is None: + if hasattr(self, 'transl'): + transl = self.transl + + if self.use_pca: + left_hand_pose = torch.einsum( + 'bi,ij->bj', [left_hand_pose, self.left_hand_components]) + right_hand_pose = torch.einsum( + 'bi,ij->bj', [right_hand_pose, self.right_hand_components]) + + full_pose = torch.cat([global_orient, body_pose, + jaw_pose, leye_pose, reye_pose, + left_hand_pose, + right_hand_pose], dim=1) + + # Add the mean pose of the model. Does not affect the body, only the + # hands when flat_hand_mean == False + # print(full_pose.shape, self.pose_mean.shape) + full_pose += self.pose_mean + + batch_size = max(betas.shape[0], global_orient.shape[0], + body_pose.shape[0]) + # Concatenate the shape and expression coefficients + scale = int(batch_size / betas.shape[0]) + if scale > 1: + betas = betas.expand(scale, -1) + shape_components = torch.cat([betas, expression], dim=-1) + + shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) + + if cfg.use_flame: + flame_shape_betas = self.flame_shape_betas.expand(batch_size, -1) + flame_expression_betas = self.flame_expression_betas.expand(batch_size, -1) + flame_betas = torch.cat([flame_betas, flame_shape_betas, flame_expression, flame_expression_betas], dim=1) + flame_shapedirs = self.flame_shapedirs + vertices, joints = lbs_v2(shape_components, full_pose, self.v_template, + shapedirs, self.posedirs, self.J_regressor, self.parents, + self.lbs_weights, flame_betas, flame_shapedirs, self.head_idxs, pose2rot=pose2rot, + ) + else: + vertices, joints = lbs(shape_components, full_pose, self.v_template, + shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=pose2rot, + ) + # print(joints.shape) + # print(self.J_regressor.shape) + lmk_faces_idx = self.lmk_faces_idx.unsqueeze( + dim=0).expand(batch_size, -1).contiguous() + lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( + self.batch_size, 1, 1) + if self.use_face_contour: + lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( + vertices, full_pose, self.dynamic_lmk_faces_idx, + self.dynamic_lmk_bary_coords, + self.neck_kin_chain, + pose2rot=True, + ) + dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords + + lmk_faces_idx = torch.cat([lmk_faces_idx, + dyn_lmk_faces_idx], 1) + lmk_bary_coords = torch.cat( + [lmk_bary_coords.expand(batch_size, -1, -1), + dyn_lmk_bary_coords], 1) + + landmarks = vertices2landmarks(vertices, self.faces_tensor, + lmk_faces_idx, + lmk_bary_coords) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + # Add the landmarks to the joints + joints = torch.cat([joints, landmarks], dim=1) + # Map the joints to the current dataset + + if self.joint_mapper is not None: + joints = self.joint_mapper(joints=joints, vertices=vertices) + + if apply_trans: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLXOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + expression=expression, + global_orient=global_orient, + body_pose=body_pose, + left_hand_pose=left_hand_pose, + right_hand_pose=right_hand_pose, + jaw_pose=jaw_pose, + faces = self.faces, + full_pose=full_pose if return_full_pose else None) + return output + +class SMPLXLayer(SMPLX): + def __init__( + self, + *args, + **kwargs + ) -> None: + # Just create a SMPLX module without any member variables + super(SMPLXLayer, self).__init__( + create_global_orient=False, + create_body_pose=False, + create_left_hand_pose=False, + create_right_hand_pose=False, + create_jaw_pose=False, + create_leye_pose=False, + create_reye_pose=False, + create_betas=False, + create_expression=False, + create_transl=False, + *args, **kwargs, + ) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + body_pose: Optional[Tensor] = None, + left_hand_pose: Optional[Tensor] = None, + right_hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + expression: Optional[Tensor] = None, + jaw_pose: Optional[Tensor] = None, + leye_pose: Optional[Tensor] = None, + reye_pose: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + **kwargs + ) -> SMPLXOutput: + ''' + Forward pass for the SMPLX model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + expression: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `expression` and use it + instead. For example, it can used if expression parameters + `expression` are predicted from some external model. + body_pose: torch.tensor, optional, shape Bx(J*3) + If given, ignore the member variable `body_pose` and use it + instead. For example, it can used if someone predicts the + pose of the body joints are predicted from some external model. + It should be a tensor that contains joint rotations in + axis-angle format. (default=None) + left_hand_pose: torch.tensor, optional, shape BxP + If given, ignore the member variable `left_hand_pose` and + use this instead. It should either contain PCA coefficients or + joint rotations in axis-angle format. + right_hand_pose: torch.tensor, optional, shape BxP + If given, ignore the member variable `right_hand_pose` and + use this instead. It should either contain PCA coefficients or + joint rotations in axis-angle format. + jaw_pose: torch.tensor, optional, shape Bx3x3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full pose vector (default=False) + Returns + ------- + output: ModelOutput + A data class that contains the posed vertices and joints + ''' + device, dtype = self.shapedirs.device, self.shapedirs.dtype + + if global_orient is None: + batch_size = 1 + global_orient = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + else: + batch_size = global_orient.shape[0] + if body_pose is None: + body_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand( + batch_size, self.NUM_BODY_JOINTS, -1).contiguous() + if left_hand_pose is None: + left_hand_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 15, -1).contiguous() + if right_hand_pose is None: + right_hand_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 15, -1).contiguous() + if jaw_pose is None: + jaw_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if leye_pose is None: + leye_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if reye_pose is None: + reye_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if expression is None: + expression = torch.zeros([batch_size, self.num_expression_coeffs], + dtype=dtype, device=device) + if betas is None: + betas = torch.zeros([batch_size, self.num_betas], + dtype=dtype, device=device) + if transl is None: + transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) + + # Concatenate all pose vectors + full_pose = torch.cat( + [global_orient.reshape(-1, 1, 3), + body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3), + jaw_pose.reshape(-1, 1, 3), + leye_pose.reshape(-1, 1, 3), + reye_pose.reshape(-1, 1, 3), + left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3), + right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3)], + dim=1) + shape_components = torch.cat([betas, expression], dim=-1) + + shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) + + vertices, joints = lbs(shape_components, full_pose, self.v_template, + shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=True) + + lmk_faces_idx = self.lmk_faces_idx.unsqueeze( + dim=0).expand(batch_size, -1).contiguous() + lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( + self.batch_size, 1, 1) + if self.use_face_contour: + lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( + vertices, full_pose, + self.dynamic_lmk_faces_idx, + self.dynamic_lmk_bary_coords, + self.neck_kin_chain, + pose2rot=False, + ) + dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords + + lmk_faces_idx = torch.cat([lmk_faces_idx, dyn_lmk_faces_idx], 1) + lmk_bary_coords = torch.cat( + [lmk_bary_coords.expand(batch_size, -1, -1), + dyn_lmk_bary_coords], 1) + + landmarks = vertices2landmarks(vertices, self.faces_tensor, + lmk_faces_idx, + lmk_bary_coords) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + # Add the landmarks to the joints + joints = torch.cat([joints, landmarks], dim=1) + # Map the joints to the current dataset + + if self.joint_mapper is not None: + joints = self.joint_mapper(joints=joints, vertices=vertices) + + if transl is not None: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = SMPLXOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + expression=expression, + global_orient=global_orient, + body_pose=body_pose, + left_hand_pose=left_hand_pose, + right_hand_pose=right_hand_pose, + jaw_pose=jaw_pose, + transl=transl, + full_pose=full_pose if return_full_pose else None) + return output + + +class MANO(SMPL): + # The hand joints are replaced by MANO + NUM_BODY_JOINTS = 1 + NUM_HAND_JOINTS = 15 + NUM_JOINTS = NUM_BODY_JOINTS + NUM_HAND_JOINTS + + def __init__( + self, + model_path: str, + is_rhand: bool = True, + data_struct: Optional[Struct] = None, + create_hand_pose: bool = True, + hand_pose: Optional[Tensor] = None, + use_pca: bool = True, + num_pca_comps: int = 6, + flat_hand_mean: bool = False, + batch_size: int = 1, + dtype=torch.float32, + vertex_ids=None, + use_compressed: bool = True, + ext: str = 'pkl', + **kwargs + ) -> None: + ''' MANO model constructor + + Parameters + ---------- + model_path: str + The path to the folder or to the file where the model + parameters are stored + data_struct: Strct + A struct object. If given, then the parameters of the model are + read from the object. Otherwise, the model tries to read the + parameters from the given `model_path`. (default = None) + create_hand_pose: bool, optional + Flag for creating a member variable for the pose of the right + hand. (default = True) + hand_pose: torch.tensor, optional, BxP + The default value for the right hand pose member variable. + (default = None) + num_pca_comps: int, optional + The number of PCA components to use for each hand. + (default = 6) + flat_hand_mean: bool, optional + If False, then the pose of the hand is initialized to False. + batch_size: int, optional + The batch size used for creating the member variables + dtype: torch.dtype, optional + The data type for the created variables + vertex_ids: dict, optional + A dictionary containing the indices of the extra vertices that + will be selected + ''' + + self.num_pca_comps = num_pca_comps + self.is_rhand = is_rhand + # If no data structure is passed, then load the data from the given + # model folder + if data_struct is None: + # Load the model + if osp.isdir(model_path): + model_fn = 'MANO_{}.{ext}'.format( + 'RIGHT' if is_rhand else 'LEFT', ext=ext) + mano_path = os.path.join(model_path, model_fn) + else: + mano_path = model_path + self.is_rhand = True if 'RIGHT' in os.path.basename( + model_path) else False + assert osp.exists(mano_path), 'Path {} does not exist!'.format( + mano_path) + + if ext == 'pkl': + with open(mano_path, 'rb') as mano_file: + model_data = pickle.load(mano_file, encoding='latin1') + elif ext == 'npz': + model_data = np.load(mano_path, allow_pickle=True) + else: + raise ValueError('Unknown extension: {}'.format(ext)) + data_struct = Struct(**model_data) + + if vertex_ids is None: + vertex_ids = VERTEX_IDS['smplh'] + + super(MANO, self).__init__( + model_path=model_path, data_struct=data_struct, + batch_size=batch_size, vertex_ids=vertex_ids, + use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs) + + # add only MANO tips to the extra joints + self.vertex_joint_selector.extra_joints_idxs = to_tensor( + list(VERTEX_IDS['mano'].values()), dtype=torch.long) + + self.use_pca = use_pca + self.num_pca_comps = num_pca_comps + if self.num_pca_comps == 45: + self.use_pca = False + self.flat_hand_mean = flat_hand_mean + + hand_components = data_struct.hands_components[:num_pca_comps] + + self.np_hand_components = hand_components + + if self.use_pca: + self.register_buffer( + 'hand_components', + torch.tensor(hand_components, dtype=dtype)) + + if self.flat_hand_mean: + hand_mean = np.zeros_like(data_struct.hands_mean) + else: + hand_mean = data_struct.hands_mean + + self.register_buffer('hand_mean', + to_tensor(hand_mean, dtype=self.dtype)) + + # Create the buffers for the pose of the left hand + hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS + if create_hand_pose: + if hand_pose is None: + default_hand_pose = torch.zeros([batch_size, hand_pose_dim], + dtype=dtype) + else: + default_hand_pose = torch.tensor(hand_pose, dtype=dtype) + + hand_pose_param = nn.Parameter(default_hand_pose, + requires_grad=True) + self.register_parameter('hand_pose', + hand_pose_param) + + # Create the buffer for the mean pose. + pose_mean = self.create_mean_pose( + data_struct, flat_hand_mean=flat_hand_mean) + pose_mean_tensor = pose_mean.clone().to(dtype) + # pose_mean_tensor = torch.tensor(pose_mean, dtype=dtype) + self.register_buffer('pose_mean', pose_mean_tensor) + + def name(self) -> str: + return 'MANO' + + def create_mean_pose(self, data_struct, flat_hand_mean=False): + # Create the array for the mean pose. If flat_hand is false, then use + # the mean that is given by the data, rather than the flat open hand + global_orient_mean = torch.zeros([3], dtype=self.dtype) + pose_mean = torch.cat([global_orient_mean, self.hand_mean], dim=0) + return pose_mean + + def extra_repr(self): + msg = [super(MANO, self).extra_repr()] + if self.use_pca: + msg.append(f'Number of PCA components: {self.num_pca_comps}') + msg.append(f'Flat hand mean: {self.flat_hand_mean}') + return '\n'.join(msg) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + **kwargs + ) -> MANOOutput: + ''' Forward pass for the MANO model + ''' + # If no shape and pose parameters are passed along, then use the + # ones from the module + global_orient = (global_orient if global_orient is not None else + self.global_orient) + betas = betas if betas is not None else self.betas + hand_pose = (hand_pose if hand_pose is not None else + self.hand_pose) + + apply_trans = transl is not None or hasattr(self, 'transl') + if transl is None: + if hasattr(self, 'transl'): + transl = self.transl + + if self.use_pca: + hand_pose = torch.einsum( + 'bi,ij->bj', [hand_pose, self.hand_components]) + + full_pose = torch.cat([global_orient, hand_pose], dim=1) + full_pose += self.pose_mean + + vertices, joints = lbs(betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=True, + ) + + # # Add pre-selected extra joints that might be needed + # joints = self.vertex_joint_selector(vertices, joints) + + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if apply_trans: + joints = joints + transl.unsqueeze(dim=1) + vertices = vertices + transl.unsqueeze(dim=1) + + output = MANOOutput(vertices=vertices if return_verts else None, + joints=joints if return_verts else None, + betas=betas, + global_orient=global_orient, + hand_pose=hand_pose, + full_pose=full_pose if return_full_pose else None) + + return output + +class MANOLayer(MANO): + def __init__(self, *args, **kwargs) -> None: + ''' MANO as a layer model constructor + ''' + super(MANOLayer, self).__init__( + create_global_orient=False, + create_hand_pose=False, + create_betas=False, + create_transl=False, + *args, **kwargs) + + def name(self) -> str: + return 'MANO' + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + hand_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + **kwargs + ) -> MANOOutput: + ''' Forward pass for the MANO model + ''' + device, dtype = self.shapedirs.device, self.shapedirs.dtype + if global_orient is None: + batch_size = 1 + global_orient = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + else: + batch_size = global_orient.shape[0] + if hand_pose is None: + hand_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 15, -1).contiguous() + if betas is None: + betas = torch.zeros( + [batch_size, self.num_betas], dtype=dtype, device=device) + if transl is None: + transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) + + full_pose = torch.cat([global_orient, hand_pose], dim=1) + vertices, joints = lbs(betas, full_pose, self.v_template, + self.shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=True) + + if self.joint_mapper is not None: + joints = self.joint_mapper(joints) + + if transl is not None: + joints = joints + transl.unsqueeze(dim=1) + vertices = vertices + transl.unsqueeze(dim=1) + + output = MANOOutput( + vertices=vertices if return_verts else None, + joints=joints if return_verts else None, + betas=betas, + global_orient=global_orient, + hand_pose=hand_pose, + full_pose=full_pose if return_full_pose else None) + + return output + + +class FLAME(SMPL): + NUM_JOINTS = 5 + SHAPE_SPACE_DIM = 300 + EXPRESSION_SPACE_DIM = 100 + NECK_IDX = 0 + + def __init__( + self, + model_path: str, + data_struct=None, + num_expression_coeffs=10, + create_expression: bool = True, + expression: Optional[Tensor] = None, + create_neck_pose: bool = True, + neck_pose: Optional[Tensor] = None, + create_jaw_pose: bool = True, + jaw_pose: Optional[Tensor] = None, + create_leye_pose: bool = True, + leye_pose: Optional[Tensor] = None, + create_reye_pose=True, + reye_pose: Optional[Tensor] = None, + use_face_contour=False, + batch_size: int = 1, + gender: str = 'neutral', + dtype: torch.dtype = torch.float32, + ext='pkl', + **kwargs + ) -> None: + ''' FLAME model constructor + + Parameters + ---------- + model_path: str + The path to the folder or to the file where the model + parameters are stored + num_expression_coeffs: int, optional + Number of expression components to use + (default = 10). + create_expression: bool, optional + Flag for creating a member variable for the expression space + (default = True). + expression: torch.tensor, optional, Bx10 + The default value for the expression member variable. + (default = None) + create_neck_pose: bool, optional + Flag for creating a member variable for the neck pose. + (default = False) + neck_pose: torch.tensor, optional, Bx3 + The default value for the neck pose variable. + (default = None) + create_jaw_pose: bool, optional + Flag for creating a member variable for the jaw pose. + (default = False) + jaw_pose: torch.tensor, optional, Bx3 + The default value for the jaw pose variable. + (default = None) + create_leye_pose: bool, optional + Flag for creating a member variable for the left eye pose. + (default = False) + leye_pose: torch.tensor, optional, Bx10 + The default value for the left eye pose variable. + (default = None) + create_reye_pose: bool, optional + Flag for creating a member variable for the right eye pose. + (default = False) + reye_pose: torch.tensor, optional, Bx10 + The default value for the right eye pose variable. + (default = None) + use_face_contour: bool, optional + Whether to compute the keypoints that form the facial contour + batch_size: int, optional + The batch size used for creating the member variables + gender: str, optional + Which gender to load + dtype: torch.dtype + The data type for the created variables + ''' + model_fn = f'FLAME_{gender.upper()}.{ext}' + flame_path = os.path.join(model_path, model_fn) + assert osp.exists(flame_path), 'Path {} does not exist!'.format( + flame_path) + if ext == 'npz': + file_data = np.load(flame_path, allow_pickle=True) + elif ext == 'pkl': + with open(flame_path, 'rb') as smpl_file: + file_data = pickle.load(smpl_file, encoding='latin1') + else: + raise ValueError('Unknown extension: {}'.format(ext)) + data_struct = Struct(**file_data) + + super(FLAME, self).__init__( + model_path=model_path, + data_struct=data_struct, + dtype=dtype, + batch_size=batch_size, + gender=gender, + ext=ext, + **kwargs) + + self.use_face_contour = use_face_contour + + self.vertex_joint_selector.extra_joints_idxs = to_tensor( + [], dtype=torch.long) + + if create_neck_pose: + if neck_pose is None: + default_neck_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_neck_pose = torch.tensor(neck_pose, dtype=dtype) + neck_pose_param = nn.Parameter( + default_neck_pose, requires_grad=True) + self.register_parameter('neck_pose', neck_pose_param) + + if create_jaw_pose: + if jaw_pose is None: + default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype) + jaw_pose_param = nn.Parameter(default_jaw_pose, + requires_grad=True) + self.register_parameter('jaw_pose', jaw_pose_param) + + if create_leye_pose: + if leye_pose is None: + default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_leye_pose = torch.tensor(leye_pose, dtype=dtype) + leye_pose_param = nn.Parameter(default_leye_pose, + requires_grad=True) + self.register_parameter('leye_pose', leye_pose_param) + + if create_reye_pose: + if reye_pose is None: + default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype) + else: + default_reye_pose = torch.tensor(reye_pose, dtype=dtype) + reye_pose_param = nn.Parameter(default_reye_pose, + requires_grad=True) + self.register_parameter('reye_pose', reye_pose_param) + + shapedirs = data_struct.shapedirs + if len(shapedirs.shape) < 3: + shapedirs = shapedirs[:, :, None] + if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM + + self.EXPRESSION_SPACE_DIM): + print(f'WARNING: You are using a {self.name()} model, with only' + ' 10 shape and 10 expression coefficients.') + expr_start_idx = 10 + expr_end_idx = 20 + num_expression_coeffs = min(num_expression_coeffs, 10) + else: + expr_start_idx = self.SHAPE_SPACE_DIM + expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs + num_expression_coeffs = min( + num_expression_coeffs, self.EXPRESSION_SPACE_DIM) + + self._num_expression_coeffs = num_expression_coeffs + + expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx] + self.register_buffer( + 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype)) + + if create_expression: + if expression is None: + default_expression = torch.zeros( + [batch_size, self.num_expression_coeffs], dtype=dtype) + else: + default_expression = torch.tensor(expression, dtype=dtype) + expression_param = nn.Parameter(default_expression, + requires_grad=True) + self.register_parameter('expression', expression_param) + + # The pickle file that contains the barycentric coordinates for + # regressing the landmarks + landmark_bcoord_filename = osp.join( + model_path, 'flame_static_embedding.pkl') + + with open(landmark_bcoord_filename, 'rb') as fp: + landmarks_data = pickle.load(fp, encoding='latin1') + + lmk_faces_idx = landmarks_data['lmk_face_idx'].astype(np.int64) + self.register_buffer('lmk_faces_idx', + torch.tensor(lmk_faces_idx, dtype=torch.long)) + lmk_bary_coords = landmarks_data['lmk_b_coords'] + self.register_buffer('lmk_bary_coords', + torch.tensor(lmk_bary_coords, dtype=dtype)) + if self.use_face_contour: + face_contour_path = os.path.join( + model_path, 'flame_dynamic_embedding.npy') + contour_embeddings = np.load(face_contour_path, + allow_pickle=True, + encoding='latin1')[()] + + dynamic_lmk_faces_idx = np.array( + contour_embeddings['lmk_face_idx'], dtype=np.int64) + dynamic_lmk_faces_idx = torch.tensor( + dynamic_lmk_faces_idx, + dtype=torch.long) + self.register_buffer('dynamic_lmk_faces_idx', + dynamic_lmk_faces_idx) + + dynamic_lmk_b_coords = torch.tensor( + contour_embeddings['lmk_b_coords'], dtype=dtype) + self.register_buffer( + 'dynamic_lmk_bary_coords', dynamic_lmk_b_coords) + + neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents) + self.register_buffer( + 'neck_kin_chain', + torch.tensor(neck_kin_chain, dtype=torch.long)) + + @property + def num_expression_coeffs(self): + return self._num_expression_coeffs + + def name(self) -> str: + return 'FLAME' + + def extra_repr(self): + msg = [ + super(FLAME, self).extra_repr(), + f'Number of Expression Coefficients: {self.num_expression_coeffs}', + f'Use face contour: {self.use_face_contour}', + ] + return '\n'.join(msg) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + neck_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + expression: Optional[Tensor] = None, + jaw_pose: Optional[Tensor] = None, + leye_pose: Optional[Tensor] = None, + reye_pose: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> FLAMEOutput: + ''' + Forward pass for the SMPLX model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + expression: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `expression` and use it + instead. For example, it can used if expression parameters + `expression` are predicted from some external model. + jaw_pose: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + jaw_pose: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full axis-angle pose vector (default=False) + + Returns + ------- + output: ModelOutput + A named tuple of type `ModelOutput` + ''' + + # If no shape and pose parameters are passed along, then use the + # ones from the module + global_orient = (global_orient if global_orient is not None else + self.global_orient) + jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose + neck_pose = neck_pose if neck_pose is not None else self.neck_pose + + leye_pose = leye_pose if leye_pose is not None else self.leye_pose + reye_pose = reye_pose if reye_pose is not None else self.reye_pose + + betas = betas if betas is not None else self.betas + expression = expression if expression is not None else self.expression + + apply_trans = transl is not None or hasattr(self, 'transl') + if transl is None: + if hasattr(self, 'transl'): + transl = self.transl + + full_pose = torch.cat( + [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1) + + batch_size = max(betas.shape[0], global_orient.shape[0], + jaw_pose.shape[0]) + # Concatenate the shape and expression coefficients + scale = int(batch_size / betas.shape[0]) + if scale > 1: + betas = betas.expand(scale, -1) + shape_components = torch.cat([betas, expression], dim=-1) + shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) + + vertices, joints = lbs(shape_components, full_pose, self.v_template, + shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=pose2rot, + ) + + lmk_faces_idx = self.lmk_faces_idx.unsqueeze( + dim=0).expand(batch_size, -1).contiguous() + lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( + self.batch_size, 1, 1) + if self.use_face_contour: + lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( + vertices, full_pose, self.dynamic_lmk_faces_idx, + self.dynamic_lmk_bary_coords, + self.neck_kin_chain, + pose2rot=True, + ) + dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords + lmk_faces_idx = torch.cat([lmk_faces_idx, + dyn_lmk_faces_idx], 1) + lmk_bary_coords = torch.cat( + [lmk_bary_coords.expand(batch_size, -1, -1), + dyn_lmk_bary_coords], 1) + + landmarks = vertices2landmarks(vertices, self.faces_tensor, + lmk_faces_idx, + lmk_bary_coords) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + # Add the landmarks to the joints + joints = torch.cat([joints, landmarks], dim=1) + + # Map the joints to the current dataset + if self.joint_mapper is not None: + joints = self.joint_mapper(joints=joints, vertices=vertices) + + if apply_trans: + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = FLAMEOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + expression=expression, + global_orient=global_orient, + neck_pose=neck_pose, + jaw_pose=jaw_pose, + full_pose=full_pose if return_full_pose else None) + return output + +class FLAMELayer(FLAME): + def __init__(self, *args, **kwargs) -> None: + ''' FLAME as a layer model constructor ''' + super(FLAMELayer, self).__init__( + create_betas=False, + create_expression=False, + create_global_orient=False, + create_neck_pose=False, + create_jaw_pose=False, + create_leye_pose=False, + create_reye_pose=False, + *args, + **kwargs) + + def forward( + self, + betas: Optional[Tensor] = None, + global_orient: Optional[Tensor] = None, + neck_pose: Optional[Tensor] = None, + transl: Optional[Tensor] = None, + expression: Optional[Tensor] = None, + jaw_pose: Optional[Tensor] = None, + leye_pose: Optional[Tensor] = None, + reye_pose: Optional[Tensor] = None, + return_verts: bool = True, + return_full_pose: bool = False, + pose2rot: bool = True, + **kwargs + ) -> FLAMEOutput: + ''' + Forward pass for the SMPLX model + + Parameters + ---------- + global_orient: torch.tensor, optional, shape Bx3 + If given, ignore the member variable and use it as the global + rotation of the body. Useful if someone wishes to predicts this + with an external model. (default=None) + betas: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `betas` and use it + instead. For example, it can used if shape parameters + `betas` are predicted from some external model. + (default=None) + expression: torch.tensor, optional, shape Bx10 + If given, ignore the member variable `expression` and use it + instead. For example, it can used if expression parameters + `expression` are predicted from some external model. + jaw_pose: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + jaw_pose: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `jaw_pose` and + use this instead. It should either joint rotations in + axis-angle format. + transl: torch.tensor, optional, shape Bx3 + If given, ignore the member variable `transl` and use it + instead. For example, it can used if the translation + `transl` is predicted from some external model. + (default=None) + return_verts: bool, optional + Return the vertices. (default=True) + return_full_pose: bool, optional + Returns the full axis-angle pose vector (default=False) + + Returns + ------- + output: ModelOutput + A named tuple of type `ModelOutput` + ''' + device, dtype = self.shapedirs.device, self.shapedirs.dtype + if global_orient is None: + batch_size = 1 + global_orient = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + else: + batch_size = global_orient.shape[0] + if neck_pose is None: + neck_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, 1, -1).contiguous() + if jaw_pose is None: + jaw_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if leye_pose is None: + leye_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if reye_pose is None: + reye_pose = torch.zeros(3, device=device, dtype=dtype).view( + 1, 1, 3).expand(batch_size, -1, -1).contiguous() + if betas is None: + betas = torch.zeros([batch_size, self.num_betas], + dtype=dtype, device=device) + if expression is None: + expression = torch.zeros([batch_size, self.num_expression_coeffs], + dtype=dtype, device=device) + if transl is None: + transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) + + full_pose = torch.cat( + [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1) + + shape_components = torch.cat([betas, expression], dim=-1) + shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) + + vertices, joints = lbs(shape_components, full_pose, self.v_template, + shapedirs, self.posedirs, + self.J_regressor, self.parents, + self.lbs_weights, pose2rot=True, + ) + + lmk_faces_idx = self.lmk_faces_idx.unsqueeze( + dim=0).expand(batch_size, -1).contiguous() + lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( + self.batch_size, 1, 1) + if self.use_face_contour: + lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( + vertices, full_pose, self.dynamic_lmk_faces_idx, + self.dynamic_lmk_bary_coords, + self.neck_kin_chain, + pose2rot=False, + ) + dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords + lmk_faces_idx = torch.cat([lmk_faces_idx, + dyn_lmk_faces_idx], 1) + lmk_bary_coords = torch.cat( + [lmk_bary_coords.expand(batch_size, -1, -1), + dyn_lmk_bary_coords], 1) + + landmarks = vertices2landmarks(vertices, self.faces_tensor, + lmk_faces_idx, + lmk_bary_coords) + + # Add any extra joints that might be needed + joints = self.vertex_joint_selector(vertices, joints) + # Add the landmarks to the joints + joints = torch.cat([joints, landmarks], dim=1) + + # Map the joints to the current dataset + if self.joint_mapper is not None: + joints = self.joint_mapper(joints=joints, vertices=vertices) + + joints += transl.unsqueeze(dim=1) + vertices += transl.unsqueeze(dim=1) + + output = FLAMEOutput(vertices=vertices if return_verts else None, + joints=joints, + betas=betas, + expression=expression, + global_orient=global_orient, + neck_pose=neck_pose, + jaw_pose=jaw_pose, + full_pose=full_pose if return_full_pose else None) + return output + + +def build_layer( + model_path: str, + model_type: str = 'smpl', + **kwargs +) -> Union[SMPLLayer, SMPLHLayer, SMPLXLayer, MANOLayer, FLAMELayer]: + ''' Method for creating a model from a path and a model type + + Parameters + ---------- + model_path: str + Either the path to the model you wish to load or a folder, + where each subfolder contains the differents types, i.e.: + model_path: + | + |-- smpl + |-- SMPL_FEMALE + |-- SMPL_NEUTRAL + |-- SMPL_MALE + |-- smplh + |-- SMPLH_FEMALE + |-- SMPLH_MALE + |-- smplx + |-- SMPLX_FEMALE + |-- SMPLX_NEUTRAL + |-- SMPLX_MALE + |-- mano + |-- MANO RIGHT + |-- MANO LEFT + |-- flame + |-- FLAME_FEMALE + |-- FLAME_MALE + |-- FLAME_NEUTRAL + + model_type: str, optional + When model_path is a folder, then this parameter specifies the + type of model to be loaded + **kwargs: dict + Keyword arguments + + Returns + ------- + body_model: nn.Module + The PyTorch module that implements the corresponding body model + Raises + ------ + ValueError: In case the model type is not one of SMPL, SMPLH, + SMPLX, MANO or FLAME + ''' + + if osp.isdir(model_path): + model_path = os.path.join(model_path, model_type) + else: + model_type = osp.basename(model_path).split('_')[0].lower() + + if model_type.lower() == 'smpl': + return SMPLLayer(model_path, **kwargs) + elif model_type.lower() == 'smplh': + return SMPLHLayer(model_path, **kwargs) + elif model_type.lower() == 'smplx': + return SMPLXLayer(model_path, **kwargs) + elif 'mano' in model_type.lower(): + return MANOLayer(model_path, **kwargs) + elif 'flame' in model_type.lower(): + return FLAMELayer(model_path, **kwargs) + else: + raise ValueError(f'Unknown model type {model_type}, exiting!') + + +def create( + model_path: str, + model_type: str = 'smpl', + **kwargs +) -> Union[SMPL, SMPLH, SMPLX, MANO, FLAME]: + ''' Method for creating a model from a path and a model type + + Parameters + ---------- + model_path: str + Either the path to the model you wish to load or a folder, + where each subfolder contains the differents types, i.e.: + model_path: + | + |-- smpl + |-- SMPL_FEMALE + |-- SMPL_NEUTRAL + |-- SMPL_MALE + |-- smplh + |-- SMPLH_FEMALE + |-- SMPLH_MALE + |-- smplx + |-- SMPLX_FEMALE + |-- SMPLX_NEUTRAL + |-- SMPLX_MALE + |-- mano + |-- MANO RIGHT + |-- MANO LEFT + + model_type: str, optional + When model_path is a folder, then this parameter specifies the + type of model to be loaded + **kwargs: dict + Keyword arguments + + Returns + ------- + body_model: nn.Module + The PyTorch module that implements the corresponding body model + Raises + ------ + ValueError: In case the model type is not one of SMPL, SMPLH, + SMPLX, MANO or FLAME + ''' + + # If it's a folder, assume + if osp.isdir(model_path): + model_path = os.path.join(model_path, model_type) + else: + model_type = osp.basename(model_path).split('_')[0].lower() + + if model_type.lower() == 'smpl': + return SMPL(model_path, **kwargs) + elif model_type.lower() == 'smplh': + return SMPLH(model_path, **kwargs) + elif model_type.lower() == 'smplx': + return SMPLX(model_path, **kwargs) + elif 'mano' in model_type.lower(): + return MANO(model_path, **kwargs) + elif 'flame' in model_type.lower(): + return FLAME(model_path, **kwargs) + else: + raise ValueError(f'Unknown model type {model_type}, exiting!') diff --git a/utils/smplx/smplx/joint_names.py b/utils/smplx/smplx/joint_names.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3a10f8cef8b50075dc9f680459fc5d596a0013 --- /dev/null +++ b/utils/smplx/smplx/joint_names.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +JOINT_NAMES = [ + 'pelvis', + 'left_hip', + 'right_hip', + 'spine1', + 'left_knee', + 'right_knee', + 'spine2', + 'left_ankle', + 'right_ankle', + 'spine3', + 'left_foot', + 'right_foot', + 'neck', + 'left_collar', + 'right_collar', + 'head', + 'left_shoulder', + 'right_shoulder', + 'left_elbow', + 'right_elbow', + 'left_wrist', + 'right_wrist', + 'jaw', + 'left_eye_smplhf', + 'right_eye_smplhf', + 'left_index1', + 'left_index2', + 'left_index3', + 'left_middle1', + 'left_middle2', + 'left_middle3', + 'left_pinky1', + 'left_pinky2', + 'left_pinky3', + 'left_ring1', + 'left_ring2', + 'left_ring3', + 'left_thumb1', + 'left_thumb2', + 'left_thumb3', + 'right_index1', + 'right_index2', + 'right_index3', + 'right_middle1', + 'right_middle2', + 'right_middle3', + 'right_pinky1', + 'right_pinky2', + 'right_pinky3', + 'right_ring1', + 'right_ring2', + 'right_ring3', + 'right_thumb1', + 'right_thumb2', + 'right_thumb3', + 'nose', + 'right_eye', + 'left_eye', + 'right_ear', + 'left_ear', + 'left_big_toe', + 'left_small_toe', + 'left_heel', + 'right_big_toe', + 'right_small_toe', + 'right_heel', + 'left_thumb', + 'left_index', + 'left_middle', + 'left_ring', + 'left_pinky', + 'right_thumb', + 'right_index', + 'right_middle', + 'right_ring', + 'right_pinky', + 'right_eye_brow1', + 'right_eye_brow2', + 'right_eye_brow3', + 'right_eye_brow4', + 'right_eye_brow5', + 'left_eye_brow5', + 'left_eye_brow4', + 'left_eye_brow3', + 'left_eye_brow2', + 'left_eye_brow1', + 'nose1', + 'nose2', + 'nose3', + 'nose4', + 'right_nose_2', + 'right_nose_1', + 'nose_middle', + 'left_nose_1', + 'left_nose_2', + 'right_eye1', + 'right_eye2', + 'right_eye3', + 'right_eye4', + 'right_eye5', + 'right_eye6', + 'left_eye4', + 'left_eye3', + 'left_eye2', + 'left_eye1', + 'left_eye6', + 'left_eye5', + 'right_mouth_1', + 'right_mouth_2', + 'right_mouth_3', + 'mouth_top', + 'left_mouth_3', + 'left_mouth_2', + 'left_mouth_1', + 'left_mouth_5', # 59 in OpenPose output + 'left_mouth_4', # 58 in OpenPose output + 'mouth_bottom', + 'right_mouth_4', + 'right_mouth_5', + 'right_lip_1', + 'right_lip_2', + 'lip_top', + 'left_lip_2', + 'left_lip_1', + 'left_lip_3', + 'lip_bottom', + 'right_lip_3', + # Face contour + 'right_contour_1', + 'right_contour_2', + 'right_contour_3', + 'right_contour_4', + 'right_contour_5', + 'right_contour_6', + 'right_contour_7', + 'right_contour_8', + 'contour_middle', + 'left_contour_8', + 'left_contour_7', + 'left_contour_6', + 'left_contour_5', + 'left_contour_4', + 'left_contour_3', + 'left_contour_2', + 'left_contour_1', +] diff --git a/utils/smplx/smplx/lbs.py b/utils/smplx/smplx/lbs.py new file mode 100644 index 0000000000000000000000000000000000000000..ace0c54defb954c4b2f6a1fc03f675311da7b196 --- /dev/null +++ b/utils/smplx/smplx/lbs.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +from typing import Tuple, List +import numpy as np + +import torch +import torch.nn.functional as F + +from .utils import rot_mat_to_euler, Tensor + + +def find_dynamic_lmk_idx_and_bcoords( + vertices: Tensor, + pose: Tensor, + dynamic_lmk_faces_idx: Tensor, + dynamic_lmk_b_coords: Tensor, + neck_kin_chain: List[int], + pose2rot: bool = True, +) -> Tuple[Tensor, Tensor]: + ''' Compute the faces, barycentric coordinates for the dynamic landmarks + + + To do so, we first compute the rotation of the neck around the y-axis + and then use a pre-computed look-up table to find the faces and the + barycentric coordinates that will be used. + + Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) + for providing the original TensorFlow implementation and for the LUT. + + Parameters + ---------- + vertices: torch.tensor BxVx3, dtype = torch.float32 + The tensor of input vertices + pose: torch.tensor Bx(Jx3), dtype = torch.float32 + The current pose of the body model + dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long + The look-up table from neck rotation to faces + dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 + The look-up table from neck rotation to barycentric coordinates + neck_kin_chain: list + A python list that contains the indices of the joints that form the + kinematic chain of the neck. + dtype: torch.dtype, optional + + Returns + ------- + dyn_lmk_faces_idx: torch.tensor, dtype = torch.long + A tensor of size BxL that contains the indices of the faces that + will be used to compute the current dynamic landmarks. + dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 + A tensor of size BxL that contains the indices of the faces that + will be used to compute the current dynamic landmarks. + ''' + + dtype = vertices.dtype + batch_size = vertices.shape[0] + + if pose2rot: + aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1, + neck_kin_chain) + rot_mats = batch_rodrigues( + aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3) + else: + rot_mats = torch.index_select( + pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain) + + rel_rot_mat = torch.eye( + 3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat( + batch_size, 1, 1) + for idx in range(len(neck_kin_chain)): + rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat) + + y_rot_angle = torch.round( + torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, + max=39)).to(dtype=torch.long) + neg_mask = y_rot_angle.lt(0).to(dtype=torch.long) + mask = y_rot_angle.lt(-39).to(dtype=torch.long) + neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle) + y_rot_angle = (neg_mask * neg_vals + + (1 - neg_mask) * y_rot_angle) + + dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, + 0, y_rot_angle) + dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, + 0, y_rot_angle) + + return dyn_lmk_faces_idx, dyn_lmk_b_coords + + +def vertices2landmarks( + vertices: Tensor, + faces: Tensor, + lmk_faces_idx: Tensor, + lmk_bary_coords: Tensor +) -> Tensor: + ''' Calculates landmarks by barycentric interpolation + + Parameters + ---------- + vertices: torch.tensor BxVx3, dtype = torch.float32 + The tensor of input vertices + faces: torch.tensor Fx3, dtype = torch.long + The faces of the mesh + lmk_faces_idx: torch.tensor L, dtype = torch.long + The tensor with the indices of the faces used to calculate the + landmarks. + lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 + The tensor of barycentric coordinates that are used to interpolate + the landmarks + + Returns + ------- + landmarks: torch.tensor BxLx3, dtype = torch.float32 + The coordinates of the landmarks for each mesh in the batch + ''' + # Extract the indices of the vertices for each face + # BxLx3 + batch_size, num_verts = vertices.shape[:2] + device = vertices.device + + lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view( + batch_size, -1, 3) + + lmk_faces += torch.arange( + batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts + + lmk_vertices = vertices.view(-1, 3)[lmk_faces].view( + batch_size, -1, 3, 3) + + landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords]) + return landmarks + + +def lbs( + betas: Tensor, + pose: Tensor, + v_template: Tensor, + shapedirs: Tensor, + posedirs: Tensor, + J_regressor: Tensor, + parents: Tensor, + lbs_weights: Tensor, + pose2rot: bool = True, +) -> Tuple[Tensor, Tensor]: + ''' Performs Linear Blend Skinning with the given shape and pose parameters + + Parameters + ---------- + betas : torch.tensor BxNB + The tensor of shape parameters + pose : torch.tensor Bx(J + 1) * 3 + The pose parameters in axis-angle format + v_template torch.tensor BxVx3 + The template mesh that will be deformed + shapedirs : torch.tensor 1xNB + The tensor of PCA shape displacements + posedirs : torch.tensor Px(V * 3) + The pose PCA coefficients + J_regressor : torch.tensor JxV + The regressor array that is used to calculate the joints from + the position of the vertices + parents: torch.tensor J + The array that describes the kinematic tree for the model + lbs_weights: torch.tensor N x V x (J + 1) + The linear blend skinning weights that represent how much the + rotation matrix of each part affects each vertex + pose2rot: bool, optional + Flag on whether to convert the input pose tensor to rotation + matrices. The default value is True. If False, then the pose tensor + should already contain rotation matrices and have a size of + Bx(J + 1)x9 + dtype: torch.dtype, optional + + Returns + ------- + verts: torch.tensor BxVx3 + The vertices of the mesh after applying the shape and pose + displacements. + joints: torch.tensor BxJx3 + The joints of the model + ''' + + batch_size = max(betas.shape[0], pose.shape[0]) + device, dtype = betas.device, betas.dtype + + # Add shape contribution + v_shaped = v_template + blend_shapes(betas, shapedirs) + + # Get the joints + # NxJx3 array + J = vertices2joints(J_regressor, v_shaped) + + # 3. Add pose blend shapes + # N x J x 3 x 3 + ident = torch.eye(3, dtype=dtype, device=device) + if pose2rot: + rot_mats = batch_rodrigues(pose.view(-1, 3)).view( + [batch_size, -1, 3, 3]) + + pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) + # (N x P) x (P, V * 3) -> N x V x 3 + pose_offsets = torch.matmul( + pose_feature, posedirs).view(batch_size, -1, 3) + else: + pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident + rot_mats = pose.view(batch_size, -1, 3, 3) + + pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), + posedirs).view(batch_size, -1, 3) + + v_posed = pose_offsets + v_shaped + + # 4. Get the global joint location + J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) + + # 5. Do skinning: + # W is N x V x (J + 1) + W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) + # (N x V x (J + 1)) x (N x (J + 1) x 16) + num_joints = J_regressor.shape[0] + T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ + .view(batch_size, -1, 4, 4) + + homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], + dtype=dtype, device=device) + v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) + v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) + verts = v_homo[:, :, :3, 0] + + return verts, J_transformed + + +def lbs_v2( + betas: Tensor, + pose: Tensor, + v_template: Tensor, + shapedirs: Tensor, + posedirs: Tensor, + J_regressor: Tensor, + parents: Tensor, + lbs_weights: Tensor, + flame_betas: Tensor, + flame_shapedirs: Tensor, + head_idxs: np.array, + pose2rot: bool = True, +) -> Tuple[Tensor, Tensor]: + ''' Performs Linear Blend Skinning with the given shape and pose parameters + + Parameters + ---------- + betas : torch.tensor BxNB + The tensor of shape parameters + pose : torch.tensor Bx(J + 1) * 3 + The pose parameters in axis-angle format + v_template torch.tensor BxVx3 + The template mesh that will be deformed + shapedirs : torch.tensor 1xNB + The tensor of PCA shape displacements + posedirs : torch.tensor Px(V * 3) + The pose PCA coefficients + J_regressor : torch.tensor JxV + The regressor array that is used to calculate the joints from + the position of the vertices + parents: torch.tensor J + The array that describes the kinematic tree for the model + lbs_weights: torch.tensor N x V x (J + 1) + The linear blend skinning weights that represent how much the + rotation matrix of each part affects each vertex + pose2rot: bool, optional + Flag on whether to convert the input pose tensor to rotation + matrices. The default value is True. If False, then the pose tensor + should already contain rotation matrices and have a size of + Bx(J + 1)x9 + dtype: torch.dtype, optional + + Returns + ------- + verts: torch.tensor BxVx3 + The vertices of the mesh after applying the shape and pose + displacements. + joints: torch.tensor BxJx3 + The joints of the model + ''' + + batch_size = max(betas.shape[0], pose.shape[0]) + device, dtype = betas.device, betas.dtype + + # Add shape contribution + v_shaped = v_template + blend_shapes(betas, shapedirs) + + # Add shape contribution of FLAME + flame_v_shape = v_template[head_idxs] + blend_shapes(flame_betas, flame_shapedirs) + v_shaped[:, head_idxs] = flame_v_shape + + # Get the joints + # NxJx3 array + J = vertices2joints(J_regressor, v_shaped) + + # 3. Add pose blend shapes + # N x J x 3 x 3 + ident = torch.eye(3, dtype=dtype, device=device) + if pose2rot: + rot_mats = batch_rodrigues(pose.view(-1, 3)).view( + [batch_size, -1, 3, 3]) + + pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) + # (N x P) x (P, V * 3) -> N x V x 3 + pose_offsets = torch.matmul( + pose_feature, posedirs).view(batch_size, -1, 3) + else: + pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident + rot_mats = pose.view(batch_size, -1, 3, 3) + + pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), + posedirs).view(batch_size, -1, 3) + + v_posed = pose_offsets + v_shaped + + # 4. Get the global joint location + J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) + + # 5. Do skinning: + # W is N x V x (J + 1) + W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) + # (N x V x (J + 1)) x (N x (J + 1) x 16) + num_joints = J_regressor.shape[0] + T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ + .view(batch_size, -1, 4, 4) + + homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], dtype=dtype, device=device) + v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) + v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) + verts = v_homo[:, :, :3, 0] + + return verts, J_transformed + +def vertices2joints(J_regressor: Tensor, vertices: Tensor) -> Tensor: + ''' Calculates the 3D joint locations from the vertices + + Parameters + ---------- + J_regressor : torch.tensor JxV + The regressor array that is used to calculate the joints from the + position of the vertices + vertices : torch.tensor BxVx3 + The tensor of mesh vertices + + Returns + ------- + torch.tensor BxJx3 + The location of the joints + ''' + + return torch.einsum('bik,ji->bjk', [vertices, J_regressor]) + + +def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor: + ''' Calculates the per vertex displacement due to the blend shapes + + + Parameters + ---------- + betas : torch.tensor Bx(num_betas) + Blend shape coefficients + shape_disps: torch.tensor Vx3x(num_betas) + Blend shapes + + Returns + ------- + torch.tensor BxVx3 + The per-vertex displacement due to shape deformation + ''' + + # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l] + # i.e. Multiply each shape displacement by its corresponding beta and + # then sum them. + blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps]) + return blend_shape + + +def batch_rodrigues( + rot_vecs: Tensor, + epsilon: float = 1e-8, +) -> Tensor: + ''' Calculates the rotation matrices for a batch of rotation vectors + Parameters + ---------- + rot_vecs: torch.tensor Nx3 + array of N axis-angle vectors + Returns + ------- + R: torch.tensor Nx3x3 + The rotation matrices for the given axis-angle parameters + ''' + + batch_size = rot_vecs.shape[0] + device, dtype = rot_vecs.device, rot_vecs.dtype + + angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) + rot_dir = rot_vecs / angle + + cos = torch.unsqueeze(torch.cos(angle), dim=1) + sin = torch.unsqueeze(torch.sin(angle), dim=1) + + # Bx1 arrays + rx, ry, rz = torch.split(rot_dir, 1, dim=1) + K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) + + zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) + K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ + .view((batch_size, 3, 3)) + + ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) + rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) + return rot_mat + + +def transform_mat(R: Tensor, t: Tensor) -> Tensor: + ''' Creates a batch of transformation matrices + Args: + - R: Bx3x3 array of a batch of rotation matrices + - t: Bx3x1 array of a batch of translation vectors + Returns: + - T: Bx4x4 Transformation matrix + ''' + # No padding left or right, only add an extra row + return torch.cat([F.pad(R, [0, 0, 0, 1]), + F.pad(t, [0, 0, 0, 1], value=1)], dim=2) + + +def batch_rigid_transform( + rot_mats: Tensor, + joints: Tensor, + parents: Tensor, + dtype=torch.float32 +) -> Tensor: + """ + Applies a batch of rigid transformations to the joints + + Parameters + ---------- + rot_mats : torch.tensor BxNx3x3 + Tensor of rotation matrices + joints : torch.tensor BxNx3 + Locations of joints + parents : torch.tensor BxN + The kinematic tree of each object + dtype : torch.dtype, optional: + The data type of the created tensors, the default is torch.float32 + + Returns + ------- + posed_joints : torch.tensor BxNx3 + The locations of the joints after applying the pose rotations + rel_transforms : torch.tensor BxNx4x4 + The relative (with respect to the root joint) rigid transformations + for all the joints + """ + + joints = torch.unsqueeze(joints, dim=-1) + + rel_joints = joints.clone() + rel_joints[:, 1:] -= joints[:, parents[1:]] + + transforms_mat = transform_mat( + rot_mats.reshape(-1, 3, 3), + rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4) + + transform_chain = [transforms_mat[:, 0]] + for i in range(1, parents.shape[0]): + # Subtract the joint location at the rest pose + # No need for rotation, since it's identity when at rest + curr_res = torch.matmul(transform_chain[parents[i]], + transforms_mat[:, i]) + transform_chain.append(curr_res) + + transforms = torch.stack(transform_chain, dim=1) + + # The last column of the transformations contains the posed joints + posed_joints = transforms[:, :, :3, 3] + + joints_homogen = F.pad(joints, [0, 0, 0, 1]) + + rel_transforms = transforms - F.pad( + torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0]) + + return posed_joints, rel_transforms diff --git a/utils/smplx/smplx/utils.py b/utils/smplx/smplx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..91dcb14b7db742255cd7f8a1d5d1a65b0c997c51 --- /dev/null +++ b/utils/smplx/smplx/utils.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from typing import NewType, Union, Optional +from dataclasses import dataclass, asdict, fields +import numpy as np +import torch + +Tensor = NewType('Tensor', torch.Tensor) +Array = NewType('Array', np.ndarray) + + +@dataclass +class ModelOutput: + vertices: Optional[Tensor] = None + joints: Optional[Tensor] = None + full_pose: Optional[Tensor] = None + global_orient: Optional[Tensor] = None + transl: Optional[Tensor] = None + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + return self.keys() + + def keys(self): + keys = [t.name for t in fields(self)] + return iter(keys) + + def values(self): + values = [getattr(self, t.name) for t in fields(self)] + return iter(values) + + def items(self): + data = [(t.name, getattr(self, t.name)) for t in fields(self)] + return iter(data) + + +@dataclass +class SMPLOutput(ModelOutput): + betas: Optional[Tensor] = None + body_pose: Optional[Tensor] = None + + +@dataclass +class SMPLHOutput(SMPLOutput): + left_hand_pose: Optional[Tensor] = None + right_hand_pose: Optional[Tensor] = None + transl: Optional[Tensor] = None + + +@dataclass +class SMPLXOutput(SMPLHOutput): + expression: Optional[Tensor] = None + jaw_pose: Optional[Tensor] = None + faces: Optional[Tensor] = None + + +@dataclass +class MANOOutput(ModelOutput): + betas: Optional[Tensor] = None + hand_pose: Optional[Tensor] = None + + +@dataclass +class FLAMEOutput(ModelOutput): + betas: Optional[Tensor] = None + expression: Optional[Tensor] = None + jaw_pose: Optional[Tensor] = None + neck_pose: Optional[Tensor] = None + + +def find_joint_kin_chain(joint_id, kinematic_tree): + kin_chain = [] + curr_idx = joint_id + while curr_idx != -1: + kin_chain.append(curr_idx) + curr_idx = kinematic_tree[curr_idx] + return kin_chain + + +def to_tensor( + array: Union[Array, Tensor], dtype=torch.float32 +) -> Tensor: + if torch.is_tensor(array): + return array + else: + return torch.tensor(array, dtype=dtype) + + +class Struct(object): + def __init__(self, **kwargs): + for key, val in kwargs.items(): + setattr(self, key, val) + + +def to_np(array, dtype=np.float32): + if 'scipy.sparse' in str(type(array)): + array = array.todense() + return np.array(array, dtype=dtype) + + +def rot_mat_to_euler(rot_mats): + # Calculates rotation matrix to euler angles + # Careful for extreme cases of eular angles like [0.0, pi, 0.0] + + sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] + + rot_mats[:, 1, 0] * rot_mats[:, 1, 0]) + return torch.atan2(-rot_mats[:, 2, 0], sy) diff --git a/utils/smplx/smplx/vertex_ids.py b/utils/smplx/smplx/vertex_ids.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7a4c36700f002da54a9e181eabbd47af2a95bc --- /dev/null +++ b/utils/smplx/smplx/vertex_ids.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import division + +# Joint name to vertex mapping. SMPL/SMPL-H/SMPL-X vertices that correspond to +# MSCOCO and OpenPose joints +vertex_ids = { + 'smplh': { + 'nose': 332, + 'reye': 6260, + 'leye': 2800, + 'rear': 4071, + 'lear': 583, + 'rthumb': 6191, + 'rindex': 5782, + 'rmiddle': 5905, + 'rring': 6016, + 'rpinky': 6133, + 'lthumb': 2746, + 'lindex': 2319, + 'lmiddle': 2445, + 'lring': 2556, + 'lpinky': 2673, + 'LBigToe': 3216, + 'LSmallToe': 3226, + 'LHeel': 3387, + 'RBigToe': 6617, + 'RSmallToe': 6624, + 'RHeel': 6787 + }, + 'smplx': { + 'nose': 9120, + 'reye': 9929, + 'leye': 9448, + 'rear': 616, + 'lear': 6, + 'rthumb': 8079, + 'rindex': 7669, + 'rmiddle': 7794, + 'rring': 7905, + 'rpinky': 8022, + 'lthumb': 5361, + 'lindex': 4933, + 'lmiddle': 5058, + 'lring': 5169, + 'lpinky': 5286, + 'LBigToe': 5770, + 'LSmallToe': 5780, + 'LHeel': 8846, + 'RBigToe': 8463, + 'RSmallToe': 8474, + 'RHeel': 8635 + }, + 'mano': { + 'thumb': 744, + 'index': 320, + 'middle': 443, + 'ring': 554, + 'pinky': 671, + } +} diff --git a/utils/smplx/smplx/vertex_joint_selector.py b/utils/smplx/smplx/vertex_joint_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8298bd5e087731f86c1c699703b5219e046c5c --- /dev/null +++ b/utils/smplx/smplx/vertex_joint_selector.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import numpy as np + +import torch +import torch.nn as nn + +from .utils import to_tensor + + +class VertexJointSelector(nn.Module): + + def __init__(self, vertex_ids=None, + use_hands=True, + use_feet_keypoints=True, **kwargs): + super(VertexJointSelector, self).__init__() + + extra_joints_idxs = [] + + face_keyp_idxs = np.array([ + vertex_ids['nose'], + vertex_ids['reye'], + vertex_ids['leye'], + vertex_ids['rear'], + vertex_ids['lear']], dtype=np.int64) + + extra_joints_idxs = np.concatenate([extra_joints_idxs, + face_keyp_idxs]) + + if use_feet_keypoints: + feet_keyp_idxs = np.array([vertex_ids['LBigToe'], + vertex_ids['LSmallToe'], + vertex_ids['LHeel'], + vertex_ids['RBigToe'], + vertex_ids['RSmallToe'], + vertex_ids['RHeel']], dtype=np.int32) + + extra_joints_idxs = np.concatenate( + [extra_joints_idxs, feet_keyp_idxs]) + + if use_hands: + self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky'] + + tips_idxs = [] + for hand_id in ['l', 'r']: + for tip_name in self.tip_names: + tips_idxs.append(vertex_ids[hand_id + tip_name]) + + extra_joints_idxs = np.concatenate( + [extra_joints_idxs, tips_idxs]) + + self.register_buffer('extra_joints_idxs', + to_tensor(extra_joints_idxs, dtype=torch.long)) + + def forward(self, vertices, joints): + extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs) + joints = torch.cat([joints, extra_joints], dim=1) + + return joints diff --git a/utils/smplx/tools/README.md b/utils/smplx/tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dd58ca9a94d841e81738ee22a29377c3b115bed7 --- /dev/null +++ b/utils/smplx/tools/README.md @@ -0,0 +1,20 @@ +## Removing Chumpy objects + +In a Python 2 virtual environment with [Chumpy](https://github.com/mattloper/chumpy) installed run the following to remove any Chumpy objects from the model data: + +```bash +python tools/clean_ch.py --input-models path-to-models/*.pkl --output-folder output-folder +``` + +## Merging SMPL-H and MANO parameters + +In order to use the given PyTorch SMPL-H module we first need to merge the SMPL-H and MANO parameters in a single file. After agreeing to the license and downloading the models, run the following command: + +```bash +python tools/merge_smplh_mano.py --smplh-fn SMPLH_FOLDER/SMPLH_GENDER.pkl \ + --mano-left-fn MANO_FOLDER/MANO_LEFT.pkl \ + --mano-right-fn MANO_FOLDER/MANO_RIGHT.pkl \ + --output-folder OUTPUT_FOLDER +``` + +where SMPLH_FOLDER is the folder with the SMPL-H files and MANO_FOLDER the one for the MANO files. diff --git a/utils/smplx/tools/__init__.py b/utils/smplx/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..098b529b7f169758710ab788be94fe5d83e51256 --- /dev/null +++ b/utils/smplx/tools/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems and the Max Planck Institute for Biological +# Cybernetics. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +import clean_ch +import merge_smplh_mano diff --git a/utils/smplx/tools/clean_ch.py b/utils/smplx/tools/clean_ch.py new file mode 100644 index 0000000000000000000000000000000000000000..56874b374c5d25aeb4ace0aefb3570bd7b891c22 --- /dev/null +++ b/utils/smplx/tools/clean_ch.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems and the Max Planck Institute for Biological +# Cybernetics. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import division + +import argparse +import os +import os.path as osp + +import pickle + +from tqdm import tqdm +import numpy as np + + +def clean_fn(fn, output_folder='output'): + with open(fn, 'rb') as body_file: + body_data = pickle.load(body_file) + + output_dict = {} + for key, data in body_data.iteritems(): + if 'chumpy' in str(type(data)): + output_dict[key] = np.array(data) + else: + output_dict[key] = data + + out_fn = osp.split(fn)[1] + + out_path = osp.join(output_folder, out_fn) + with open(out_path, 'wb') as out_file: + pickle.dump(output_dict, out_file) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input-models', dest='input_models', nargs='+', + required=True, type=str, + help='The path to the model that will be processed') + parser.add_argument('--output-folder', dest='output_folder', + required=True, type=str, + help='The path to the output folder') + + args = parser.parse_args() + + input_models = args.input_models + output_folder = args.output_folder + if not osp.exists(output_folder): + print('Creating directory: {}'.format(output_folder)) + os.makedirs(output_folder) + + for input_model in input_models: + clean_fn(input_model, output_folder=output_folder) diff --git a/utils/smplx/tools/merge_smplh_mano.py b/utils/smplx/tools/merge_smplh_mano.py new file mode 100644 index 0000000000000000000000000000000000000000..eab9d1ea60c224cf3785bd90dc542569ad81cd78 --- /dev/null +++ b/utils/smplx/tools/merge_smplh_mano.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems and the Max Planck Institute for Biological +# Cybernetics. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de + +from __future__ import print_function + +import os +import os.path as osp +import pickle + +import argparse + +import numpy as np + + +def merge_models(smplh_fn, mano_left_fn, mano_right_fn, + output_folder='output'): + + with open(smplh_fn, 'rb') as body_file: + body_data = pickle.load(body_file) + + with open(mano_left_fn, 'rb') as lhand_file: + lhand_data = pickle.load(lhand_file) + + with open(mano_right_fn, 'rb') as rhand_file: + rhand_data = pickle.load(rhand_file) + + out_fn = osp.split(smplh_fn)[1] + + output_data = body_data.copy() + output_data['hands_componentsl'] = lhand_data['hands_components'] + output_data['hands_componentsr'] = rhand_data['hands_components'] + + output_data['hands_coeffsl'] = lhand_data['hands_coeffs'] + output_data['hands_coeffsr'] = rhand_data['hands_coeffs'] + + output_data['hands_meanl'] = lhand_data['hands_mean'] + output_data['hands_meanr'] = rhand_data['hands_mean'] + + for key, data in output_data.iteritems(): + if 'chumpy' in str(type(data)): + output_data[key] = np.array(data) + else: + output_data[key] = data + + out_path = osp.join(output_folder, out_fn) + print(out_path) + print('Saving to {}'.format(out_path)) + with open(out_path, 'wb') as output_file: + pickle.dump(output_data, output_file) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--smplh-fn', dest='smplh_fn', required=True, + type=str, help='The path to the SMPLH model') + parser.add_argument('--mano-left-fn', dest='mano_left_fn', required=True, + type=str, help='The path to the left hand MANO model') + parser.add_argument('--mano-right-fn', dest='mano_right_fn', required=True, + type=str, help='The path to the right hand MANO model') + parser.add_argument('--output-folder', dest='output_folder', + required=True, type=str, + help='The path to the output folder') + + args = parser.parse_args() + + smplh_fn = args.smplh_fn + mano_left_fn = args.mano_left_fn + mano_right_fn = args.mano_right_fn + output_folder = args.output_folder + + if not osp.exists(output_folder): + print('Creating directory: {}'.format(output_folder)) + os.makedirs(output_folder) + + merge_models(smplh_fn, mano_left_fn, mano_right_fn, output_folder) diff --git a/utils/transforms.py b/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a721aadc8057d3027d30d9be3c3f6b7cce9a3ab4 --- /dev/null +++ b/utils/transforms.py @@ -0,0 +1,160 @@ +import torch +import numpy as np +import scipy +# from config import cfg +from torch.nn import functional as F +import torchgeometry as tgm + +def cam2pixel(cam_coord, f, c): + x = cam_coord[:,0] / cam_coord[:,2] * f[0] + c[0] + y = cam_coord[:,1] / cam_coord[:,2] * f[1] + c[1] + z = cam_coord[:,2] + return np.stack((x,y,z),1) + +def pixel2cam(pixel_coord, f, c): + x = (pixel_coord[:,0] - c[0]) / f[0] * pixel_coord[:,2] + y = (pixel_coord[:,1] - c[1]) / f[1] * pixel_coord[:,2] + z = pixel_coord[:,2] + return np.stack((x,y,z),1) + +def world2cam(world_coord, R, t): + cam_coord = np.dot(R, world_coord.transpose(1,0)).transpose(1,0) + t.reshape(1,3) + return cam_coord + +def cam2world(cam_coord, R, t): + world_coord = np.dot(np.linalg.inv(R), (cam_coord - t.reshape(1,3)).transpose(1,0)).transpose(1,0) + return world_coord + +def rigid_transform_3D(A, B): + n, dim = A.shape + centroid_A = np.mean(A, axis = 0) + centroid_B = np.mean(B, axis = 0) + H = np.dot(np.transpose(A - centroid_A), B - centroid_B) / n + U, s, V = np.linalg.svd(H) + R = np.dot(np.transpose(V), np.transpose(U)) + if np.linalg.det(R) < 0: + s[-1] = -s[-1] + V[2] = -V[2] + R = np.dot(np.transpose(V), np.transpose(U)) + + varP = np.var(A, axis=0).sum() + c = 1/varP * np.sum(s) + + t = -np.dot(c*R, np.transpose(centroid_A)) + np.transpose(centroid_B) + return c, R, t + +def rigid_align(A, B): + c, R, t = rigid_transform_3D(A, B) + A2 = np.transpose(np.dot(c*R, np.transpose(A))) + t + return A2 + +def transform_joint_to_other_db(src_joint, src_name, dst_name): + src_joint_num = len(src_name) + dst_joint_num = len(dst_name) + + new_joint = np.zeros(((dst_joint_num,) + src_joint.shape[1:]), dtype=np.float32) + for src_idx in range(len(src_name)): + name = src_name[src_idx] + if name in dst_name: + dst_idx = dst_name.index(name) + new_joint[dst_idx] = src_joint[src_idx] + + return new_joint + +def rot6d_to_axis_angle(x): + batch_size = x.shape[0] + + x = x.view(-1,3,2) + a1 = x[:, :, 0] + a2 = x[:, :, 1] + b1 = F.normalize(a1) + b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) + b3 = torch.cross(b1, b2) + rot_mat = torch.stack((b1, b2, b3), dim=-1) # 3x3 rotation matrix + + rot_mat = torch.cat([rot_mat,torch.zeros((batch_size,3,1)).cuda().float()],2) # 3x4 rotation matrix + axis_angle = tgm.rotation_matrix_to_angle_axis(rot_mat).reshape(-1,3) # axis-angle + axis_angle[torch.isnan(axis_angle)] = 0.0 + return axis_angle + +def sample_joint_features(img_feat, joint_xy): + height, width = img_feat.shape[2:] + x = joint_xy[:,:,0] / (width-1) * 2 - 1 + y = joint_xy[:,:,1] / (height-1) * 2 - 1 + grid = torch.stack((x,y),2)[:,:,None,:] + img_feat = F.grid_sample(img_feat, grid, align_corners=True)[:,:,:,0] # batch_size, channel_dim, joint_num + img_feat = img_feat.permute(0,2,1).contiguous() # batch_size, joint_num, channel_dim + return img_feat + + +def soft_argmax_2d(heatmap2d): + batch_size = heatmap2d.shape[0] + height, width = heatmap2d.shape[2:] + heatmap2d = heatmap2d.reshape((batch_size, -1, height*width)) + heatmap2d = F.softmax(heatmap2d, 2) + heatmap2d = heatmap2d.reshape((batch_size, -1, height, width)) + + accu_x = heatmap2d.sum(dim=(2)) + accu_y = heatmap2d.sum(dim=(3)) + + accu_x = accu_x * torch.arange(width).float().cuda()[None,None,:] + accu_y = accu_y * torch.arange(height).float().cuda()[None,None,:] + + accu_x = accu_x.sum(dim=2, keepdim=True) + accu_y = accu_y.sum(dim=2, keepdim=True) + + coord_out = torch.cat((accu_x, accu_y), dim=2) + return coord_out + +def soft_argmax_3d(heatmap3d): + batch_size = heatmap3d.shape[0] + depth, height, width = heatmap3d.shape[2:] + heatmap3d = heatmap3d.reshape((batch_size, -1, depth*height*width)) + heatmap3d = F.softmax(heatmap3d, 2) + heatmap3d = heatmap3d.reshape((batch_size, -1, depth, height, width)) + + accu_x = heatmap3d.sum(dim=(2,3)) + accu_y = heatmap3d.sum(dim=(2,4)) + accu_z = heatmap3d.sum(dim=(3,4)) + + accu_x = accu_x * torch.arange(width).float().cuda()[None,None,:] + accu_y = accu_y * torch.arange(height).float().cuda()[None,None,:] + accu_z = accu_z * torch.arange(depth).float().cuda()[None,None,:] + + accu_x = accu_x.sum(dim=2, keepdim=True) + accu_y = accu_y.sum(dim=2, keepdim=True) + accu_z = accu_z.sum(dim=2, keepdim=True) + + coord_out = torch.cat((accu_x, accu_y, accu_z), dim=2) + return coord_out + +def restore_bbox(bbox_center, bbox_size, aspect_ratio, extension_ratio): + bbox = bbox_center.view(-1,1,2) + torch.cat((-bbox_size.view(-1,1,2)/2., bbox_size.view(-1,1,2)/2.),1) # xyxy in (cfg.output_hm_shape[2], cfg.output_hm_shape[1]) space + bbox[:,:,0] = bbox[:,:,0] / cfg.output_hm_shape[2] * cfg.input_body_shape[1] + bbox[:,:,1] = bbox[:,:,1] / cfg.output_hm_shape[1] * cfg.input_body_shape[0] + bbox = bbox.view(-1,4) + + # xyxy -> xywh + bbox[:,2] = bbox[:,2] - bbox[:,0] + bbox[:,3] = bbox[:,3] - bbox[:,1] + + # aspect ratio preserving bbox + w = bbox[:,2] + h = bbox[:,3] + c_x = bbox[:,0] + w/2. + c_y = bbox[:,1] + h/2. + + mask1 = w > (aspect_ratio * h) + mask2 = w < (aspect_ratio * h) + h[mask1] = w[mask1] / aspect_ratio + w[mask2] = h[mask2] * aspect_ratio + + bbox[:,2] = w*extension_ratio + bbox[:,3] = h*extension_ratio + bbox[:,0] = c_x - bbox[:,2]/2. + bbox[:,1] = c_y - bbox[:,3]/2. + + # xywh -> xyxy + bbox[:,2] = bbox[:,2] + bbox[:,0] + bbox[:,3] = bbox[:,3] + bbox[:,1] + return bbox diff --git a/utils/utils_model.py b/utils/utils_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c81294537fac1d3c3642755dacd3800ceb32505f --- /dev/null +++ b/utils/utils_model.py @@ -0,0 +1,82 @@ +import numpy as np +import torch +import torch.optim as optim +import logging +import os +import sys + +def getCi(accLog): + + mean = np.mean(accLog) + std = np.std(accLog) + ci95 = 1.96*std/np.sqrt(len(accLog)) + + return mean, ci95 + +def get_logger(out_dir): + logger = logging.getLogger('Exp') + logger.setLevel(logging.INFO) + formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s") + + file_path = os.path.join(out_dir, "run.log") + file_hdlr = logging.FileHandler(file_path) + file_hdlr.setFormatter(formatter) + + strm_hdlr = logging.StreamHandler(sys.stdout) + strm_hdlr.setFormatter(formatter) + + logger.addHandler(file_hdlr) + logger.addHandler(strm_hdlr) + return logger + + +def initial_optim(decay_option, lr, weight_decay, net, optimizer) : + + if optimizer == 'adamw' : + optimizer_adam_family = optim.AdamW + elif optimizer == 'adam' : + optimizer_adam_family = optim.Adam + if decay_option == 'all': + optimizer = optimizer_adam_family(net.parameters(), lr=lr, betas=(0.9, 0.99), weight_decay=weight_decay) + + else: + raise NotImplementedError + + + return optimizer + + +def initial_optim_with_eps(decay_option, lr, weight_decay, net, optimizer, eps) : + + if optimizer == 'adamw' : + optimizer_adam_family = optim.AdamW + elif optimizer == 'adam' : + optimizer_adam_family = optim.Adam + if decay_option == 'all': + + optimizer = optimizer_adam_family(net.parameters(), lr=lr, betas=(0.9, 0.99), weight_decay=weight_decay, eps=eps) + + elif decay_option == 'noVQ': + all_params = set(net.parameters()) + no_decay = set([net.vq_layer]) + + decay = all_params - no_decay + optimizer = optimizer_adam_family([ + {'params': list(no_decay), 'weight_decay': 0}, + {'params': list(decay), 'weight_decay' : weight_decay}], lr=lr, eps=eps) + + return optimizer + + +def get_motion_with_trans(motion, velocity) : + ''' + motion : torch.tensor, shape (batch_size, T, 72), with the global translation = 0 + velocity : torch.tensor, shape (batch_size, T, 3), contain the information of velocity = 0 + + ''' + trans = torch.cumsum(velocity, dim=1) + trans = trans - trans[:, :1] + trans = trans.repeat((1, 1, 21)) + motion_with_trans = motion + trans + return motion_with_trans + \ No newline at end of file diff --git a/visualization/plot_3d_global.py b/visualization/plot_3d_global.py new file mode 100644 index 0000000000000000000000000000000000000000..672b84eb43817ada21363d9ec1a907dcf1aa50b7 --- /dev/null +++ b/visualization/plot_3d_global.py @@ -0,0 +1,129 @@ +import torch +import matplotlib.pyplot as plt +import numpy as np +import io +import matplotlib +from mpl_toolkits.mplot3d.art3d import Poly3DCollection +import mpl_toolkits.mplot3d.axes3d as p3 +from textwrap import wrap +import imageio + +def plot_3d_motion(args, figsize=(10, 10), fps=120, radius=4): + matplotlib.use('Agg') + + + joints, out_name, title = args + + data = joints.copy().reshape(len(joints), -1, 3) + + nb_joints = joints.shape[1] + smpl_kinetic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] if nb_joints == 21 else [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] + limits = 1000 if nb_joints == 21 else 2 + MINS = data.min(axis=0).min(axis=0) + MAXS = data.max(axis=0).max(axis=0) + colors = ['red', 'blue', 'black', 'red', 'blue', + 'darkblue', 'darkblue', 'darkblue', 'darkblue', 'darkblue', + 'darkred', 'darkred', 'darkred', 'darkred', 'darkred'] + frame_number = data.shape[0] + + height_offset = MINS[1] + data[:, :, 1] -= height_offset + trajec = data[:, 0, [0, 2]] + + data[..., 0] -= data[:, 0:1, 0] + data[..., 2] -= data[:, 0:1, 2] + + def update(index): + + def init(): + ax.set_xlim(-limits, limits) + ax.set_ylim(-limits, limits) + ax.set_zlim(0, limits) + ax.grid(b=False) + def plot_xzPlane(minx, maxx, miny, minz, maxz): + ## Plot a plane XZ + verts = [ + [minx, miny, minz], + [minx, miny, maxz], + [maxx, miny, maxz], + [maxx, miny, minz] + ] + xz_plane = Poly3DCollection([verts]) + xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5)) + ax.add_collection3d(xz_plane) + fig = plt.figure(figsize=(480/96., 320/96.), dpi=96) if nb_joints == 21 else plt.figure(figsize=(10, 10), dpi=96) + if title is not None : + wraped_title = '\n'.join(wrap(title, 40)) + fig.suptitle(wraped_title, fontsize=16) + ax = p3.Axes3D(fig, auto_add_to_figure=False) + fig.add_axes(ax) + + init() + + ax.lines = [] + ax.collections = [] + ax.view_init(elev=110, azim=-90) + ax.dist = 7.5 + # ax = + plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1], + MAXS[2] - trajec[index, 1]) + # ax.scatter(data[index, :22, 0], data[index, :22, 1], data[index, :22, 2], color='black', s=3) + + if index > 1: + ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]), + trajec[:index, 1] - trajec[index, 1], linewidth=1.0, + color='blue') + # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2]) + + for i, (chain, color) in enumerate(zip(smpl_kinetic_chain, colors)): + # print(color) + if i < 5: + linewidth = 4.0 + else: + linewidth = 2.0 + ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth, + color=color) + # print(trajec[:index, 0].shape) + + plt.axis('off') + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_zticklabels([]) + + if out_name is not None : + plt.savefig(out_name, dpi=96) + plt.close() + + else : + io_buf = io.BytesIO() + fig.savefig(io_buf, format='raw', dpi=96) + io_buf.seek(0) + # print(fig.bbox.bounds) + arr = np.reshape(np.frombuffer(io_buf.getvalue(), dtype=np.uint8), + newshape=(int(fig.bbox.bounds[3]), int(fig.bbox.bounds[2]), -1)) + io_buf.close() + plt.close() + return arr + + out = [] + for i in range(frame_number) : + out.append(update(i)) + out = np.stack(out, axis=0) + return torch.from_numpy(out) + + +def draw_to_batch(smpl_joints_batch, title_batch=None, outname=None, fps=30) : + batch_size = len(smpl_joints_batch) + out = [] + for i in range(batch_size) : + out.append(plot_3d_motion([smpl_joints_batch[i], None, title_batch[i] if title_batch is not None else None])) + if outname is not None: + imageio.mimsave(outname[i], np.array(out[-1]), fps=fps) + out = torch.stack(out, axis=0) + return out + + + + + +