diff --git "a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json" "b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json"
new file mode 100644--- /dev/null
+++ "b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json"
@@ -0,0 +1,7808 @@
+{
+ "pdf_info": [
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 87,
+ 103,
+ 506,
+ 121
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 87,
+ 103,
+ 506,
+ 121
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 87,
+ 103,
+ 506,
+ 121
+ ],
+ "type": "text",
+ "content": "Semantics-aware Motion Retargeting with Vision-Language Models"
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": "Haodong Zhang"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{1*}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Zhike Chen"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{1*}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Haocheng Xu"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{1}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Lei Hao"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{2}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Xiaofei Wu"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{2}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Songcen Xu"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{2}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Zhensong Zhang"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{2}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Yue Wang"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{1}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Rong Xiong"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "^{1\\dagger}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": " Zhejiang University "
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "inline_equation",
+ "content": "{}^{2}"
+ },
+ {
+ "bbox": [
+ 108,
+ 142,
+ 485,
+ 186
+ ],
+ "type": "text",
+ "content": "Huawei Noah's Ark Lab"
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 143,
+ 213,
+ 192,
+ 225
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 143,
+ 213,
+ 192,
+ 225
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 143,
+ 213,
+ 192,
+ 225
+ ],
+ "type": "text",
+ "content": "Abstract"
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 46,
+ 238,
+ 290,
+ 478
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 238,
+ 290,
+ 478
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 238,
+ 290,
+ 478
+ ],
+ "type": "text",
+ "content": "Capturing and preserving motion semantics is essential to motion retargeting between animation characters. However, most of the previous works neglect the semantic information or rely on human-designed joint-level representations. Here, we present a novel Semantics-aware Motion reTargeting (SMT) method with the advantage of vision-language models to extract and maintain meaningful motion semantics. We utilize a differentiable module to render 3D motions. Then the high-level motion semantics are incorporated into the motion retargeting process by feeding the vision-language model with the rendered images and aligning the extracted semantic embeddings. To ensure the preservation of fine-grained motion details and high-level semantics, we adopt a two-stage pipeline consisting of skeleton-aware pre-training and fine-tuning with semantics and geometry constraints. Experimental results show the effectiveness of the proposed method in producing high-quality motion retargeting results while accurately preserving motion semantics. Project page can be found at https://sites.google.com/view/smtnet."
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 47,
+ 499,
+ 128,
+ 511
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 499,
+ 128,
+ 511
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 499,
+ 128,
+ 511
+ ],
+ "type": "text",
+ "content": "1. Introduction"
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 46,
+ 519,
+ 287,
+ 650
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 519,
+ 287,
+ 650
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 519,
+ 287,
+ 650
+ ],
+ "type": "text",
+ "content": "3D animation characters have extensive application in animation production, virtual reality, and various other domains. These characters are animated using motion data, resulting in lifelike and immersive animations. Nevertheless, acquiring motion data for each character can be a costly endeavor. Therefore, the ability to retarget existing motion data for new characters holds immense importance. The goal of motion retargeting is to transfer existing motion data to new characters following motion feature extraction and integration processes, which ensure the preservation of the original motion's characteristics."
+ }
+ ]
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 46,
+ 651,
+ 287,
+ 687
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 651,
+ 287,
+ 687
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 651,
+ 287,
+ 687
+ ],
+ "type": "text",
+ "content": "Semantics encompasses the meaningful and contextually relevant information conveyed in motion and plays a critical role in ensuring the realism and vividness of the anima"
+ }
+ ]
+ }
+ ],
+ "index": 8
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 306,
+ 212,
+ 429,
+ 341
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 306,
+ 212,
+ 429,
+ 341
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 212,
+ 429,
+ 341
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 212,
+ 429,
+ 341
+ ],
+ "type": "image",
+ "image_path": "bd9e13161b93841733ced865f78598c1d6a977e14cdaba3f18310fb1838f555d.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 9,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 305,
+ 350,
+ 547,
+ 439
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 350,
+ 547,
+ 439
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 350,
+ 547,
+ 439
+ ],
+ "type": "text",
+ "content": "Figure 1. Comparison with previous motion retargeting methods. (a) Previous works rely on human-designed joint distance matrix [25] or self-contacts between mesh vertices [23] to ensure semantics preservation. (b) Ours work enforces human-level motion semantics consistency with the extensive knowledge of vision-language models. (c) Comparison of motion quality and semantics preservation on the Mixamo dataset [1]. Our method achieves the best motion quality and semantics consistency."
+ }
+ ]
+ }
+ ],
+ "index": 11,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 9
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 431,
+ 213,
+ 545,
+ 341
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 431,
+ 213,
+ 545,
+ 341
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 431,
+ 213,
+ 545,
+ 341
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 431,
+ 213,
+ 545,
+ 341
+ ],
+ "type": "image",
+ "image_path": "e549d6320b67a13aa9b070cb0d500c4433aa93e45dd2ca4d66e68f52b2a81134.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 10,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 304,
+ 459,
+ 546,
+ 662
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 459,
+ 546,
+ 662
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 459,
+ 546,
+ 662
+ ],
+ "type": "text",
+ "content": "tion characters. Preservation of motion semantics can enhance the efficiency of motion retargeting by reducing the need for time-consuming manual adjustments and refinements. However, previous methods [2, 15, 22] are mainly based on retargeting of joint positions and make less use of the extraction of semantic information. They focus on trajectory-level motion retargeting with few attention to motion semantics. Consequently, this leads to a significant loss of motion semantics and necessitates the labor-intensive intervention of animation artists for manual trajectory adjustments. Recent advancements have introduced self-contacts [23] and joint distance matrices [25] as the representation of motion semantics. Nevertheless, self-contacts are not applicable to non-contact semantics and require intricate vertex correspondence. The human-designed joint distance matrices primarily focus on joint relative relationships and still lack consideration of high-level semantic information."
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 305,
+ 665,
+ 547,
+ 715
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 665,
+ 547,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 665,
+ 547,
+ 715
+ ],
+ "type": "text",
+ "content": "To address the intricate task of capturing and preserving motion semantics, we introduce a new perspective: the most general and comprehensive form of motion semantics is human-level natural language, reflecting the user's intu"
+ }
+ ]
+ }
+ ],
+ "index": 13
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 65,
+ 2,
+ 111,
+ 34
+ ],
+ "type": "header",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 65,
+ 2,
+ 111,
+ 34
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 65,
+ 2,
+ 111,
+ 34
+ ],
+ "type": "text",
+ "content": "CVF"
+ }
+ ]
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 145,
+ 0,
+ 496,
+ 37
+ ],
+ "type": "header",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 145,
+ 0,
+ 496,
+ 37
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 145,
+ 0,
+ 496,
+ 37
+ ],
+ "type": "text",
+ "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore."
+ }
+ ]
+ }
+ ],
+ "index": 1
+ },
+ {
+ "bbox": [
+ 58,
+ 693,
+ 212,
+ 703
+ ],
+ "type": "page_footnote",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 58,
+ 693,
+ 212,
+ 703
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 58,
+ 693,
+ 212,
+ 703
+ ],
+ "type": "text",
+ "content": "*These authors contributed equally to this work"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ },
+ {
+ "bbox": [
+ 59,
+ 703,
+ 200,
+ 712
+ ],
+ "type": "page_footnote",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 59,
+ 703,
+ 200,
+ 712
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 59,
+ 703,
+ 200,
+ 712
+ ],
+ "type": "text",
+ "content": "† Corresponding author: rxiong@zju.edu.cn"
+ }
+ ]
+ }
+ ],
+ "index": 15
+ },
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2155"
+ }
+ ]
+ }
+ ],
+ "index": 17
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 0
+ },
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 120
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 120
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 120
+ ],
+ "type": "text",
+ "content": "itive understanding of motion. However, the main challenge of human-level motion semantics representation lies in the scarcity of labelled data. It is difficult and expensive to label sufficient semantic textual descriptions for motion data."
+ }
+ ]
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 46,
+ 121,
+ 288,
+ 312
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 121,
+ 288,
+ 312
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 121,
+ 288,
+ 312
+ ],
+ "type": "text",
+ "content": "In this paper, we introduce the incorporation of robust, state-of-the-art vision-language models to provide semantic guidance to the motion retargeting network. In the absence of labelled semantic data, we leverage the capabilities of a vision-language model to serve as a semantic supervisor in an unsupervised manner, which can extract motion semantics in a more intuitive way, as illustrated in Fig. 1. This approach offers a solution to the challenge of the limited availability of labelled semantic datasets for motion retargeting. To establish a connection between the vision-language model and motion semantics extraction, we employ the differentiable skinning and rendering modules to translate 3D motions into image sequences. Subsequently, we adopt visual question answering with guiding questions to inquire about the most relevant motion semantics from the vision-language model."
+ }
+ ]
+ }
+ ],
+ "index": 1
+ },
+ {
+ "bbox": [
+ 46,
+ 314,
+ 288,
+ 517
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 314,
+ 288,
+ 517
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 314,
+ 288,
+ 517
+ ],
+ "type": "text",
+ "content": "To guarantee the preservation of motion semantics during motion retargeting, we introduce a semantics consistency loss that enforces the semantic embeddings of the targeted motion to closely align with those of the source motion. For dense semantic supervision and computational efficiency, we utilize latent features extracted by the vision-language model as the semantic embeddings instead of textual descriptions. To alleviate the non-linearity of the semantics consistency loss, we introduce a two-stage training approach. We categorize motion information into two distinct levels: the skeletal level and the semantic level. Our approach involves pre-training the motion retargeting network at the skeletal level, which is then further refined and fine-tuned at the semantic level with the power of vision-language models. To the best of our knowledge, we are the first to leverage the extensive capability of vision-language models for the task of semantics-aware motion retargeting."
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 59,
+ 518,
+ 275,
+ 529
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 59,
+ 518,
+ 275,
+ 529
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 59,
+ 518,
+ 275,
+ 529
+ ],
+ "type": "text",
+ "content": "To summarize, the contributions of our work include:"
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 47,
+ 534,
+ 287,
+ 713
+ ],
+ "type": "list",
+ "angle": 0,
+ "index": 8,
+ "blocks": [
+ {
+ "bbox": [
+ 47,
+ 534,
+ 287,
+ 582
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 534,
+ 287,
+ 582
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 534,
+ 287,
+ 582
+ ],
+ "type": "text",
+ "content": "- We introduce an innovative framework that leverages the expertise of vision-language models as a semantic supervisor to tackle the challenge of limited labelled semantic data for the task of motion retargeting."
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 47,
+ 582,
+ 287,
+ 629
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 582,
+ 287,
+ 629
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 582,
+ 287,
+ 629
+ ],
+ "type": "text",
+ "content": "- We propose to use differentiable skinning and rendering to translate from the motion domain to the image domain and perform guiding visual question answering to obtain human-level semantic representation."
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 47,
+ 630,
+ 287,
+ 677
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 630,
+ 287,
+ 677
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 630,
+ 287,
+ 677
+ ],
+ "type": "text",
+ "content": "- We design a semantics consistency loss to maintain motion semantics and introduce an effective two-stage training pipeline consisting of pre-training at the skeletal level and fine-tuning at the semantic level."
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 47,
+ 678,
+ 287,
+ 713
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 678,
+ 287,
+ 713
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 678,
+ 287,
+ 713
+ ],
+ "type": "text",
+ "content": "- Our model achieves state-of-the-art performance in the challenging task of semantics-aware motion retargeting, delivering exceptional performance marked by high"
+ }
+ ]
+ }
+ ],
+ "index": 7
+ }
+ ],
+ "sub_type": "text"
+ },
+ {
+ "bbox": [
+ 314,
+ 72,
+ 520,
+ 84
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 314,
+ 72,
+ 520,
+ 84
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 314,
+ 72,
+ 520,
+ 84
+ ],
+ "type": "text",
+ "content": "quality motion and superior semantics consistency."
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 306,
+ 95,
+ 397,
+ 107
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 95,
+ 397,
+ 107
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 95,
+ 397,
+ 107
+ ],
+ "type": "text",
+ "content": "2. Related Works"
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 304,
+ 114,
+ 545,
+ 306
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 114,
+ 545,
+ 306
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 114,
+ 545,
+ 306
+ ],
+ "type": "text",
+ "content": "Optimization-based Motion Retargeting. Motion retargeting is a technique to adapt existing motion data from a source character to a target character with different bone proportions, mesh skins, and skeletal structures. Early works formulate motion retargeting as a constrained optimization problem [4, 6, 11, 18]. Gleicher et al. [6] introduced a motion retargeting method, which identifies motion features as constraints and computes an adapted motion using a space-time constraint solver to preserve the desirable qualities. Lee et al. [11] proposed a method to adapt existing motion of a human-like character to have the desired features with specified constraints and combined a hierarchical curve fitting technique with inverse kinematics. Nonetheless, these methods necessitate the tedious and time-consuming process of formulating human-designed constraints for specific motion sequences."
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 304,
+ 307,
+ 545,
+ 557
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 307,
+ 545,
+ 557
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 307,
+ 545,
+ 557
+ ],
+ "type": "text",
+ "content": "Learning-based Motion Retargeting. With the rise of deep learning, researchers have been developing learning-based motion retargeting methods in recent years [2, 9, 15, 22, 23, 25]. Villegas et al. [22] presented a recurrent neural network architecture, which incorporates a forward kinematics layer and cycle consistency loss for unsupervised motion retargetting. Aberman et al. [2] designed a skeleton-aware network with differentiable convolution, pooling, and unpooling operators to transform various homeomorphic skeletons into a primary skeleton for cross-structural motion retargeting. However, these methods tend to concentrate on trajectory-level motion retargeting with limited consideration for motion semantics, which often results in a notable loss of motion semantics and increase the heavy burden of manual adjustments to the trajectories. To address these problems, Zhang et al. [25] presented a residual retargeting network that uses a skeleton-aware module to preserve motion semantics and a shape-aware module to reduce interpenetration and contact missing. While this method successfully preserves joint relative relationships, it still falls short in addressing high-level motion semantics."
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 304,
+ 558,
+ 545,
+ 713
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 558,
+ 545,
+ 713
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 558,
+ 545,
+ 713
+ ],
+ "type": "text",
+ "content": "Vision-Language Models. Vision-language models have empowered various vision-language tasks, including visual question answering and image captioning. Tevet et al. [20] introduced a human motion generation model that aligns the latent space with that of the Contrastive Language-Image Pre-training (CLIP) model. Li et al. [13] proposed a pretraining strategy from off-the-shelf frozen pre-trained image encoders and frozen large language models for vision-to-language generative learning. Zhu et al. [27] presented a vision-language model, which uses one projection layer to align a frozen visual encoder with a frozen advanced large language models (LLM). However, these efforts primarily concentrate on vision-language tasks, leaving the question"
+ }
+ ]
+ }
+ ],
+ "index": 13
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2156"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 1
+ },
+ {
+ "para_blocks": [
+ {
+ "type": "image",
+ "bbox": [
+ 62,
+ 71,
+ 532,
+ 213
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 62,
+ 71,
+ 532,
+ 213
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 62,
+ 71,
+ 532,
+ 213
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 62,
+ 71,
+ 532,
+ 213
+ ],
+ "type": "image",
+ "image_path": "c14d3f4806670441419f84b705ce55d23b7c4d74c2f021432b6dd6694aa6c582.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 0,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 61,
+ 215,
+ 174,
+ 224
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 61,
+ 215,
+ 174,
+ 224
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 61,
+ 215,
+ 174,
+ 224
+ ],
+ "type": "text",
+ "content": "Stage I: Skeleton-aware Pre-training"
+ }
+ ]
+ }
+ ],
+ "index": 1,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 0
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 61,
+ 225,
+ 212,
+ 292
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 61,
+ 225,
+ 212,
+ 292
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 61,
+ 225,
+ 212,
+ 292
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 61,
+ 225,
+ 212,
+ 292
+ ],
+ "type": "image",
+ "image_path": "69ec7727da1edbc17c8a750293f6acfb0d272504d6e5f0d1f8352703faec249d.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 3,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 46,
+ 303,
+ 545,
+ 360
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 303,
+ 545,
+ 360
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 303,
+ 545,
+ 360
+ ],
+ "type": "text",
+ "content": "Figure 2. Model Architecture. Our semantics-aware motion retargeting framework employs a two-stage pipeline. Initially, the retargeting network consisting of multiple spatial-temporal graph convolution layers is trained at the skeletal level to establish a base model. Subsequently, this model undergoes further refinement and fine-tuning at the semantic level by the alignment of latent semantic embeddings of the source and target, leveraging the extensive knowledge of vision-language models. The latent semantic embedding is extracted by guiding visual question answering. Additionally, the geometry constraints are also enforced during fine-tuning to avoid interpenetration."
+ }
+ ]
+ }
+ ],
+ "index": 5,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 3
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 217,
+ 225,
+ 533,
+ 293
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 277,
+ 216,
+ 414,
+ 225
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 277,
+ 216,
+ 414,
+ 225
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 277,
+ 216,
+ 414,
+ 225
+ ],
+ "type": "text",
+ "content": "Stage II: Semantics & Geometry Fine-tuning"
+ }
+ ]
+ }
+ ],
+ "index": 2,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 217,
+ 225,
+ 533,
+ 293
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 217,
+ 225,
+ 533,
+ 293
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 217,
+ 225,
+ 533,
+ 293
+ ],
+ "type": "image",
+ "image_path": "6bf8c1ef8f6296d6854894bb056aa820762d9293e2c39fd0d75a3383c649cc35.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 4,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 46,
+ 371,
+ 287,
+ 395
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 371,
+ 287,
+ 395
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 371,
+ 287,
+ 395
+ ],
+ "type": "text",
+ "content": "of how to effectively employ vision-language models to guide motion retargeting as an open and unexplored area."
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 46,
+ 396,
+ 287,
+ 529
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 396,
+ 287,
+ 529
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 396,
+ 287,
+ 529
+ ],
+ "type": "text",
+ "content": "Human motion synthesis. Human motion synthesis is a domain related to motion retargeting, which aims to synthesize realistic and lifelike human motions from random noise or other inputs with generative networks. Guo et al. [7] proposed to generate human motion sequences based on action type. Guo et al. [8] presented a temporal variational autoencoder to synthesize human motions from text input. Tevet et al. [21] introduced a diffusion-based generative model for human motion generation. As comparison, we focus on the task of motion retargeting, where existing motion data is transferred from a source character to a target character."
+ }
+ ]
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 47,
+ 541,
+ 102,
+ 553
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 541,
+ 102,
+ 553
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 541,
+ 102,
+ 553
+ ],
+ "type": "text",
+ "content": "3. Method"
+ }
+ ]
+ }
+ ],
+ "index": 8
+ },
+ {
+ "bbox": [
+ 47,
+ 561,
+ 115,
+ 573
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 561,
+ 115,
+ 573
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 561,
+ 115,
+ 573
+ ],
+ "type": "text",
+ "content": "3.1. Overview"
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 46,
+ 581,
+ 287,
+ 676
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 581,
+ 287,
+ 676
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 581,
+ 287,
+ 676
+ ],
+ "type": "text",
+ "content": "We present a novel semantic-aware motion retargeting method, as illustrated in Fig 2. In contrast to previous methods that neglect motion semantics [2, 15, 22] or rely on human-designed joint-level representations [25], our approach integrates natural language descriptions from vision-language models to offer an explicit and comprehensive semantic representation of character motions, thereby maintaining the preservation of semantic consistency."
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 46,
+ 677,
+ 287,
+ 713
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 677,
+ 287,
+ 713
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 677,
+ 287,
+ 713
+ ],
+ "type": "text",
+ "content": "Task definition. Given a source motion sequence, consisting of the skeleton motion and its associated skinning geometry, as well as a target character in the reference pose (e.g.,"
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 304,
+ 371,
+ 545,
+ 419
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 371,
+ 545,
+ 419
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 371,
+ 545,
+ 419
+ ],
+ "type": "text",
+ "content": "T-posed), the objective of motion retargeting is to generate the target motion while preserving crucial motion characteristics, such as joint trajectory similarity and motion semantics, and satisfying geometry constraints."
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": "Graph representation. The skeleton motion sequence can be modelled as a sequence of graphs according to the skeleton hierarchy where each node corresponds to a joint and each edge represents a directed connection between joints. Assume that the motion sequence has "
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "inline_equation",
+ "content": "T"
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": " frames in total and the animation characters have "
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "inline_equation",
+ "content": "N"
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": " nodes and "
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "inline_equation",
+ "content": "M"
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": " edges. In our approach, we consider motion data as node features "
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times 9}"
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": ", which encompasses the 6D joint rotation representation [26] and 3D joint positions. Additionally, we utilize skeleton hierarchy information as edge features "
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{E} \\in \\mathbb{R}^{M \\times 3}"
+ },
+ {
+ "bbox": [
+ 304,
+ 422,
+ 546,
+ 567
+ ],
+ "type": "text",
+ "content": ", which consists of the 3D position offset between each joint and its parent joint."
+ }
+ ]
+ }
+ ],
+ "index": 13
+ },
+ {
+ "bbox": [
+ 304,
+ 570,
+ 546,
+ 713
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 570,
+ 546,
+ 713
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 570,
+ 546,
+ 713
+ ],
+ "type": "text",
+ "content": "Two-stage training. The motion of animation characters can be divided into skeletal movements and skinned movements, represented by skeletal joints and skinned vertices respectively. The skinned movements can be derived from the skeletal movements through the linear blend skinning algorithm [12]. Therefore, motion retargeting at the skeletal level can effectively downscale the data and reduce the complexity of the problem. However, this simplification process can lead to the loss of motion semantics and violations of geometry constraints. To address these issues, we employ a two-stage pipeline. Initially, we pre-train a skeleton-aware network to ensure a general initialization for motion retard"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2157"
+ }
+ ]
+ }
+ ],
+ "index": 15
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 2
+ },
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 133
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 133
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 287,
+ 133
+ ],
+ "type": "text",
+ "content": "getting without considering motion semantics and geometry constraints. Subsequently, we fine-tune the pre-trained network for each source-target character pair with the vision-language model to maintain semantic consistency and enforce geometry constraints to prevent interpenetrations."
+ }
+ ]
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 47,
+ 138,
+ 203,
+ 152
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 138,
+ 203,
+ 152
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 138,
+ 203,
+ 152
+ ],
+ "type": "text",
+ "content": "3.2. Skeleton-aware Pre-training"
+ }
+ ]
+ }
+ ],
+ "index": 1
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": "Retargeting network. We propose a retargeting network consisting of a graph motion encoder and a graph motion decoder for motion retargeting. The motion encoder "
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\theta}"
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": " encodes the motion data "
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q}_A"
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": " of the source character A into the latent motion embedding "
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Z}_A"
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": ". Then, the motion decoder "
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\phi}"
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": " generates the joint angles "
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q}_B"
+ },
+ {
+ "bbox": [
+ 46,
+ 157,
+ 287,
+ 264
+ ],
+ "type": "text",
+ "content": " of the target character B based on the latent features. Both the motion encoder and decoder are composed of multiple graph convolutions. More details are available in the supplementary materials."
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 127,
+ 274,
+ 208,
+ 287
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 127,
+ 274,
+ 208,
+ 287
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 127,
+ 274,
+ 208,
+ 287
+ ],
+ "type": "interline_equation",
+ "content": "\\mathbf {Z} _ {A} = \\mathcal {F} _ {\\theta} (\\mathbf {Q} _ {A}, \\mathbf {E} _ {A})",
+ "image_path": "1919fe3d9ed0bd29a042719cc67e1911ba82636f63227b008c9bad9bcbdf3370.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 127,
+ 287,
+ 287,
+ 301
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 127,
+ 287,
+ 287,
+ 301
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 127,
+ 287,
+ 287,
+ 301
+ ],
+ "type": "interline_equation",
+ "content": "\\mathbf {Q} _ {B} = \\mathcal {F} _ {\\phi} (\\mathbf {Z} _ {A}, \\mathbf {E} _ {B}) \\tag {1}",
+ "image_path": "0e577cbea2ea7811c96f070e8016d8a17249289ca724884fa5dc1d85e059cacd.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 46,
+ 304,
+ 287,
+ 388
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 304,
+ 287,
+ 388
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 304,
+ 287,
+ 388
+ ],
+ "type": "text",
+ "content": "In the first phase, we train the motion encoder and decoder at the skeletal level to establish a robust initialization for motion retargeting. Following the unsupervised learning setting in [22], we train the network with the reconstruction loss, cycle consistency loss, adversarial loss, and joint relationship loss. The overall objective function for skeleton-aware pre-training is defined as follows:"
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 63,
+ 407,
+ 287,
+ 420
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 63,
+ 407,
+ 287,
+ 420
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 63,
+ 407,
+ 287,
+ 420
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {s k e l} = \\lambda_ {r} \\mathcal {L} _ {r e c} + \\lambda_ {c} \\mathcal {L} _ {c y c} + \\lambda_ {a} \\mathcal {L} _ {a d v} + \\lambda_ {j} \\mathcal {L} _ {j d m} \\tag {2}",
+ "image_path": "598dbc9915a094c5ddaf3281499c7d06215fb76ee0d4bb600f32b4e131f909be.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": "The reconstruction loss "
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{rec}"
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": " encourages the retargeted motion to match the source motion when the target character is the same as the source character. Let "
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q}_{A,t}"
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": " be the motion data of source character A at frame "
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "inline_equation",
+ "content": "t"
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": ", and "
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "inline_equation",
+ "content": "\\hat{\\mathbf{Q}}_{A,t}^{rec}"
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": " be the reconstructed motion. Then "
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{rec}"
+ },
+ {
+ "bbox": [
+ 46,
+ 426,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": " is defined as:"
+ }
+ ]
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 105,
+ 492,
+ 287,
+ 520
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 105,
+ 492,
+ 287,
+ 520
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 105,
+ 492,
+ 287,
+ 520
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {r e c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {r e c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {3}",
+ "image_path": "037cd8b61920e39bf5722851b113af5bde217d4ef145af8f8129bd27cfab1b30.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 8
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": "The cycle consistency loss "
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{cyc}"
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": " promotes the consistency of retargeted motion from the source character A to the target character B and then back to the source character A, ensuring it remains in line with the original motion. Let "
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "inline_equation",
+ "content": "\\hat{\\mathbf{Q}}_{A,t}^{cyc}"
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": " represent the retargeted motion. Then "
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{cyc}"
+ },
+ {
+ "bbox": [
+ 46,
+ 526,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": " is defined as:"
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 105,
+ 594,
+ 287,
+ 622
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 105,
+ 594,
+ 287,
+ 622
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 105,
+ 594,
+ 287,
+ 622
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {c y c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {c y c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {4}",
+ "image_path": "49692772fed633d343b051ab83a79a7f8a1cc25c479bf086d8ec27360a602885.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "content": "The adversarial loss "
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{adv}"
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "content": " is calculated by a discriminator network, which utilizes the unpaired data of the target character to learn how to distinguish whether the motions are real or fake. Let "
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\gamma}"
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "content": " be the discriminator network, and "
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q}_{B,t}"
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "content": " be the retargeted motion at frame "
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "inline_equation",
+ "content": "t"
+ },
+ {
+ "bbox": [
+ 46,
+ 625,
+ 287,
+ 685
+ ],
+ "type": "text",
+ "content": ". Then it is defined as:"
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 99,
+ 691,
+ 287,
+ 715
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 99,
+ 691,
+ 287,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 99,
+ 691,
+ 287,
+ 715
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {a d v} = \\sum_ {t} \\log \\left(1 - \\mathcal {F} _ {\\gamma} \\left(\\mathbf {Q} _ {B, t}\\right)\\right) \\tag {5}",
+ "image_path": "3b79a194dacaf25e05b88c7840b8ec52ea33090ff1427848e48944a8dc33d51e.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": "The joint relationship loss "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{jdm}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": " is calculated by the joint distance matrix (JDM) "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{D} \\in \\mathbb{R}^{N \\times N}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": ", which represents the relative positional relationships of the joints. The element "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "d_{i,j}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": " of "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{D}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": " represents the Euclidean distance between joint "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "i"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": " and joint "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "j"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": ". We extract the joint distance matrix from the target character and compare it with the source character. Then "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{jdm}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 545,
+ 156
+ ],
+ "type": "text",
+ "content": " is defined as:"
+ }
+ ]
+ }
+ ],
+ "index": 13
+ },
+ {
+ "bbox": [
+ 350,
+ 165,
+ 545,
+ 189
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 350,
+ 165,
+ 545,
+ 189
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 350,
+ 165,
+ 545,
+ 189
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {j d m} = \\sum_ {t} \\left| \\left| \\eta (\\mathbf {D} _ {A, t}) - \\eta (\\mathbf {D} _ {B, t}) \\right| \\right| _ {2} ^ {2} \\tag {6}",
+ "image_path": "26c89dc4bf2f31b37f96ccec8caa2eee48f3c566353e109125d1231d4b53db61.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ },
+ {
+ "bbox": [
+ 305,
+ 192,
+ 545,
+ 228
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 192,
+ 545,
+ 228
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 192,
+ 545,
+ 228
+ ],
+ "type": "text",
+ "content": "where "
+ },
+ {
+ "bbox": [
+ 305,
+ 192,
+ 545,
+ 228
+ ],
+ "type": "inline_equation",
+ "content": "\\eta(.)"
+ },
+ {
+ "bbox": [
+ 305,
+ 192,
+ 545,
+ 228
+ ],
+ "type": "text",
+ "content": " is an L1 normalization performed on each row of the distance matrix. This normalization operation eliminates the difference in bone length to some extent."
+ }
+ ]
+ }
+ ],
+ "index": 15
+ },
+ {
+ "bbox": [
+ 306,
+ 234,
+ 495,
+ 247
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 234,
+ 495,
+ 247
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 234,
+ 495,
+ 247
+ ],
+ "type": "text",
+ "content": "3.3. Semantics & Geometry Fine-tuning"
+ }
+ ]
+ }
+ ],
+ "index": 16
+ },
+ {
+ "bbox": [
+ 305,
+ 253,
+ 545,
+ 361
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 253,
+ 545,
+ 361
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 253,
+ 545,
+ 361
+ ],
+ "type": "text",
+ "content": "In the second phase, we fine-tune the pre-trained retargeting network for each source-target character pair to preserve motion semantics and satisfy geometry constraints. The motion semantics is maintained by the semantics consistency loss, which aligns the semantic embeddings extracted from a vision-language model for both the source and target. Additionally, the geometry constraint is satisfied by minimizing the interpenetration loss. The overall objective function for fine-tuning is outlined as follows:"
+ }
+ ]
+ }
+ ],
+ "index": 17
+ },
+ {
+ "bbox": [
+ 367,
+ 372,
+ 545,
+ 385
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 367,
+ 372,
+ 545,
+ 385
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 367,
+ 372,
+ 545,
+ 385
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {\\text {f i n e}} = \\lambda_ {s} \\mathcal {L} _ {\\text {s e m}} + \\lambda_ {p} \\mathcal {L} _ {\\text {p e n}} \\tag {7}",
+ "image_path": "30aed3a2cc6df73d54d3929337649b737b41743c47e1a94e44230511b77285c7.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 18
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": "Differentiable skinning & rendering. To make the finetuning process differentiable for gradient back-propagation, we first use the differentiable linear blend skinning algorithm [12], denoted as "
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{lbs}"
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": ", to transform the target joint angles "
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{Q}_B"
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": " into skinned motions "
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{V}_B"
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": ", represented by 3D mesh vertices. Subsequently, we employ the differentiable projection function "
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{proj}"
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": " as introduced in [16] to convert the skinned motions into 2D images "
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{I}_B"
+ },
+ {
+ "bbox": [
+ 305,
+ 387,
+ 545,
+ 555
+ ],
+ "type": "text",
+ "content": ". A limitation for the differentiable rendering process is that when projecting the 3D skinned mesh onto 2D images, the depth information is lost. To obtain a comprehensive semantic representation of the motion, we render the character from multiple perspectives and then combine the extracted features, following the Non-rigid Shape Fitting task in [16]."
+ }
+ ]
+ }
+ ],
+ "index": 19
+ },
+ {
+ "bbox": [
+ 378,
+ 564,
+ 474,
+ 577
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 378,
+ 564,
+ 474,
+ 577
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 378,
+ 564,
+ 474,
+ 577
+ ],
+ "type": "interline_equation",
+ "content": "\\mathbf {I} _ {A} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} \\left(\\mathbf {Q} _ {A}\\right)\\right)",
+ "image_path": "6b1cbe2b61b7b0d1dba3cdff005b50016d300cdda02870573087895c8a0813d1.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 20
+ },
+ {
+ "bbox": [
+ 378,
+ 576,
+ 474,
+ 592
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 378,
+ 576,
+ 474,
+ 592
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 378,
+ 576,
+ 474,
+ 592
+ ],
+ "type": "interline_equation",
+ "content": "\\mathbf {I} _ {B} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} (\\mathbf {Q} _ {B})\\right)",
+ "image_path": "e7325427fd44436d8d0aa4891e1a1e88b1891e0b265baf71a88d4ac023542bfb.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 21
+ },
+ {
+ "bbox": [
+ 545,
+ 573,
+ 547,
+ 582
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 545,
+ 573,
+ 547,
+ 582
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 545,
+ 573,
+ 547,
+ 582
+ ],
+ "type": "text",
+ "content": ""
+ }
+ ]
+ }
+ ],
+ "index": 22
+ },
+ {
+ "bbox": [
+ 305,
+ 594,
+ 545,
+ 714
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 594,
+ 545,
+ 714
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 594,
+ 545,
+ 714
+ ],
+ "type": "text",
+ "content": "Frozen vision-language model. To obtain an explicit and reliable semantic feature of the motion, we employ a frozen vision-language model as our semantic supervisor. Current 3D vision-language datasets [3, 28] mainly focus on the occupation or the segmentation of the object in a spatial scene like rooms, and thus the state-of-the-art 3D vision-language models [28] lack prior knowledge relevant to animation characters. In contrast, 2D vision-language models achieve better results in semantic tasks, such as image captioning, visual question answering and image-text"
+ }
+ ]
+ }
+ ],
+ "index": 23
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2158"
+ }
+ ]
+ }
+ ],
+ "index": 24
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 3
+ },
+ {
+ "para_blocks": [
+ {
+ "type": "image",
+ "bbox": [
+ 49,
+ 71,
+ 289,
+ 199
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 289,
+ 199
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 289,
+ 199
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 289,
+ 199
+ ],
+ "type": "image",
+ "image_path": "d2c109697a3715ea97365be98876fb3fa3c49e189dc3a65adc55a802494945f6.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 0,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 57,
+ 209,
+ 277,
+ 221
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 57,
+ 209,
+ 277,
+ 221
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 57,
+ 209,
+ 277,
+ 221
+ ],
+ "type": "text",
+ "content": "Figure 3. An example of guiding visual question answering."
+ }
+ ]
+ }
+ ],
+ "index": 1,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 46,
+ 233,
+ 287,
+ 317
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 233,
+ 287,
+ 317
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 233,
+ 287,
+ 317
+ ],
+ "type": "text",
+ "content": "retrieval, and provides cleaner and richer semantics [24]. Therefore, we utilize a frozen 2D vision-language model to extract latent embeddings of motion semantics. The frozen 2D vision-language model employed in our work is BLIP-2 [14], which incorporates a lightweight querying transformer as a bridge between the off-the-shelf frozen pre-trained image encoder and the frozen large language model."
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 46,
+ 317,
+ 287,
+ 521
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 317,
+ 287,
+ 521
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 317,
+ 287,
+ 521
+ ],
+ "type": "text",
+ "content": "Prompt design. Since the vision-language model has the capability to extract rich information from images, it is possible that the extracted features might contain redundant details, such as the appearance of the character. To guide the vision-language model to obtain semantic embedding relevant to character motions, we adopt a guiding visual question answering approach for motion semantics extraction, as depicted in Fig. 3. We believe that there is a strong correlation between motion semantics and hand movements. To acquire a more comprehensive description of the motion, we initially provide a guiding question to BLIP-2: \"Where are the hands of the character?\" Subsequently, we introduce a new question and combine it with the first answer as the input to BLIP-2: \"[The answers to the first question generated by the vision-language model] What is the character in the image doing?\" For more details, please refer to the supplementary materials."
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": "Latent semantic embedding. We opt to align the latent semantic embeddings of the source and target generated by the vision-language model rather than relying on textual descriptions, specifically leveraging the encoder output of the large language model. This approach enables us to acquire a more accurate and denser representation, while also mitigating computational costs and the non-linearity of the training objective caused by the large number of parameters of the vision-language model. Let "
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{E}_A"
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": " and "
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{E}_B"
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": " be the latent semantic embeddings of the source and target motions, "
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\omega}"
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": " be the frozen pre-trained image encoder, "
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\sigma}"
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": " be the frozen querying transformer, "
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{F}_{\\psi}"
+ },
+ {
+ "bbox": [
+ 46,
+ 521,
+ 288,
+ 677
+ ],
+ "type": "text",
+ "content": " be the encoder of the frozen large language model, and context be the question."
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 96,
+ 687,
+ 287,
+ 715
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 96,
+ 687,
+ 287,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 96,
+ 687,
+ 287,
+ 715
+ ],
+ "type": "interline_equation",
+ "content": "\\begin{array}{l} \\begin{array}{l} \\mathbf {E} _ {A} = \\mathcal {F} _ {\\psi} \\left(\\mathcal {F} _ {\\sigma} \\left(\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\\\ \\overline {{\\mathbf {E}}} = \\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\end{array} \\tag {9} \\\\ \\mathbf {E} _ {B} = \\mathcal {F} _ {\\psi} (\\mathcal {F} _ {\\sigma} (\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {B}), c o n t e x t)) \\\\ \\end{array}",
+ "image_path": "b34ab970d33aee8f7bb753524ed2943e6bbd35cbfe591157fb47a5c646835255.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 547,
+ 156
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 72,
+ 547,
+ 156
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 72,
+ 547,
+ 156
+ ],
+ "type": "text",
+ "content": "Fine-tuning with semantics consistency. As illustrated in Fig. 2, our approach aligns the latent semantic embeddings of both the source and target motions in an unsupervised manner, ensuring a high degree of semantic consistency in the retargeted results. The semantics consistency loss "
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 547,
+ 156
+ ],
+ "type": "inline_equation",
+ "content": "\\mathcal{L}_{sem}"
+ },
+ {
+ "bbox": [
+ 305,
+ 72,
+ 547,
+ 156
+ ],
+ "type": "text",
+ "content": " is calculated using the mean square error and it is defined as follows:"
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 365,
+ 165,
+ 545,
+ 190
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 365,
+ 165,
+ 545,
+ 190
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 365,
+ 165,
+ 545,
+ 190
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {s e m} = \\sum_ {t} \\| \\mathbf {E} _ {A, t} - \\mathbf {E} _ {B, t} \\| _ {2} ^ {2} \\tag {10}",
+ "image_path": "7692a0363b9d59358c9841579c9894c7f98c1f5466796dc79bbd9726a48541f7.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 305,
+ 194,
+ 545,
+ 291
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 194,
+ 545,
+ 291
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 194,
+ 545,
+ 291
+ ],
+ "type": "text",
+ "content": "Fine-tuning with geometry constraints. From our observations, most interpenetration problems occur between the limbs and the main body. To address this, we incorporate the signed distance field between the limb vertices and the body mesh as the interpenetration loss. First, we convert the skeleton motion output from the network into mesh vertices using the linear blend skinning method [12]. Then, the interpenetration loss is defined as follows:"
+ }
+ ]
+ }
+ ],
+ "index": 8
+ },
+ {
+ "bbox": [
+ 357,
+ 300,
+ 545,
+ 324
+ ],
+ "type": "interline_equation",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 357,
+ 300,
+ 545,
+ 324
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 357,
+ 300,
+ 545,
+ 324
+ ],
+ "type": "interline_equation",
+ "content": "\\mathcal {L} _ {p e n} = \\sum_ {t} R e L U (- \\Phi_ {b, t} (\\mathbf {V} _ {l, t})) \\tag {11}",
+ "image_path": "00b7f8b3cc19efb240953f6ac1e7239067f6ca6047be97cb4449f7a89ba03e77.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "text",
+ "content": "where "
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "inline_equation",
+ "content": "\\Phi_b"
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "text",
+ "content": " indicates the signed distance field function, "
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "inline_equation",
+ "content": "\\mathbf{V}_l"
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "text",
+ "content": " is the vertices of the limbs. If the vertex locates inside the body, the value of the function is less than zero. Therefore, we use the "
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "inline_equation",
+ "content": "ReLU"
+ },
+ {
+ "bbox": [
+ 305,
+ 329,
+ 546,
+ 377
+ ],
+ "type": "text",
+ "content": " function to penalize the inner vertices."
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 306,
+ 388,
+ 388,
+ 402
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 388,
+ 388,
+ 402
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 388,
+ 388,
+ 402
+ ],
+ "type": "text",
+ "content": "4. Experiments"
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 306,
+ 407,
+ 366,
+ 421
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 407,
+ 366,
+ 421
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 407,
+ 366,
+ 421
+ ],
+ "type": "text",
+ "content": "4.1. Settings"
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 304,
+ 426,
+ 545,
+ 666
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 426,
+ 545,
+ 666
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 426,
+ 545,
+ 666
+ ],
+ "type": "text",
+ "content": "Datasets. We train and evaluate our method on the Mixamo dataset [1], an extensive repository of animations performed by various 3D virtual characters with distinct skeletons and geometry shapes. The training set we use to pretrain our skeleton aware module is the same as that used in [2], which contains 1646 motions performed by 7 characters. It's important to note that the Mixamo dataset does not provide clean ground truth data, since many of the motion sequences suffer from interpenetration issues and semantic information loss. To mitigate this, we have carefully selected a subset of motion sequences that are both semantically clean and free of interpenetration issues for fine-tuning and testing. Our fine-tuning process involves retargeting 15 clean motions including 3127 frames, originally performed by 3 source characters, namely \"Y Bot\", \"X Bot\", and \"Ortiz\", onto 3 target characters, including \"Aj\", \"Kaya\", and \"Mousey\". Then we evaluate the performance of our model on the task of retargeting 30 additional motions that are previously unseen in the training set and fine-tuning sets. More details could be found in the supplementary materials."
+ }
+ ]
+ }
+ ],
+ "index": 13
+ },
+ {
+ "bbox": [
+ 305,
+ 666,
+ 547,
+ 714
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 666,
+ 547,
+ 714
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 666,
+ 547,
+ 714
+ ],
+ "type": "text",
+ "content": "Implementation details. The hyper-parameters "
+ },
+ {
+ "bbox": [
+ 305,
+ 666,
+ 547,
+ 714
+ ],
+ "type": "inline_equation",
+ "content": "\\lambda_r, \\lambda_c, \\lambda_a, \\lambda_j, \\lambda_p, \\lambda_s"
+ },
+ {
+ "bbox": [
+ 305,
+ 666,
+ 547,
+ 714
+ ],
+ "type": "text",
+ "content": " for pre-training and fine-tuning loss functions are set to 10.0, 1.0, 0.1, 1.0, 1.0, 0.1. For semantics fine-tuning, we use BLIP-2 [14] with pre-trained FlanT5-XXL"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2159"
+ }
+ ]
+ }
+ ],
+ "index": 15
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 4
+ },
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 286,
+ 167
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 286,
+ 167
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 72,
+ 286,
+ 167
+ ],
+ "type": "text",
+ "content": "[5] large language model. To extract the semantic representation of the motion, we render animation from three perspectives, including the front view, left view and right view. The fine-tuning process takes 25 epochs with 5 clean motion sequences of the source character for each target character. During pre-training and fine-tuning, we use an Adam optimizer to optimize the retargeting network. Please refer to the supplementary materials for more details."
+ }
+ ]
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 46,
+ 168,
+ 288,
+ 350
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 168,
+ 288,
+ 350
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 168,
+ 288,
+ 350
+ ],
+ "type": "text",
+ "content": "Evaluation metrics. We evaluate the performance of our method across three key dimensions: skeleton, geometry, and semantics. At the skeletal level, we measure the Mean Square Error (MSE) between retargeted joint positions and the ground truth provided by Mixamo, analyzing both the global and the local joint positions. At the geometric level, we evaluate the interpenetration percentage (PEN). At the semantic level, we utilize the Image-Text Matching (ITM) score, Fréchet inception distance (FID) and semantics consistency loss (SCL) as metrics. The ITM score quantifies the visual-semantic similarity between the source textual description and the rendered retargeted motion. FID is calculated between the semantic embedding distribution of retargeted motion and source motion. More details are provided in the supplementary materials."
+ }
+ ]
+ }
+ ],
+ "index": 1
+ },
+ {
+ "bbox": [
+ 47,
+ 358,
+ 230,
+ 371
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 358,
+ 230,
+ 371
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 358,
+ 230,
+ 371
+ ],
+ "type": "text",
+ "content": "4.2. Comparison with State of the Arts"
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 46,
+ 378,
+ 287,
+ 617
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 378,
+ 287,
+ 617
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 378,
+ 287,
+ 617
+ ],
+ "type": "text",
+ "content": "Quantitative. In this section, we conduct a comparative analysis of our method against the state-of-the-art approaches as illustrated in Tab. 1. The baseline methods include R2ET [25], SAN [2], NKN [22] and the Copy strategy. The Copy strategy achieves the lowest local MSE because the ground truth data in the Mixamo dataset are not entirely clean, and many of them are generated by copying rotations. As a result, this strategy comes at the cost of semantic loss and interpenetration issues. SAN [2] and NKN [22] focus on skeleton-level motion features, which results in a high interpenetration rate and relatively low semantics preservation. R2ET [25] treats motion semantics as the joint distance matrix and mesh distance field, which helps it obtain better motion semantics than SAN and Copy. Nevertheless, there is still a gap between the human-designed distance matrix and the human-level semantics. Notably, our model exhibits the best interpenetration rate and semantics preservation among all methods, showcasing the capability of the proposed method in producing high-quality retargeted motions with semantics consistency."
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 46,
+ 618,
+ 287,
+ 715
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 618,
+ 287,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 618,
+ 287,
+ 715
+ ],
+ "type": "text",
+ "content": "Qualitative. In Fig. 4, we visualize the text descriptions of the motions and the qualitative comparison between the state-of-the-arts and our method. SAN [2] and Copy neglect the preservation of semantics and have severe interpenetration. R2ET [25] utilizes joint distance matrix as semantics representation and fails to capture high-level semantic information. For example, the salute motion retargeted by R2ET [25] appears more like a hand-up motion. As a comparison,"
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "type": "table",
+ "bbox": [
+ 305,
+ 70,
+ 545,
+ 164
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 305,
+ 70,
+ 545,
+ 164
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 70,
+ 545,
+ 164
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 70,
+ 545,
+ 164
+ ],
+ "type": "table",
+ "html": "
| Method | MSE ↓ | MSElc ↓ | Pen.% ↓ | ITM ↑ | FID ↓ | SCL ↓ |
| Source | - | - | 4.43 | 0.796 | - | - |
| GT | - | - | 9.06 | 0.582 | 26.99 | 1.331 |
| Copy | - | 0.005 | 9.03 | 0.581 | 26.58 | 1.327 |
| NKN [22] | 0.326 | 0.231 | 8.71 | 0.575 | 27.79 | 1.414 |
| SAN [2] | 0.435 | 0.255 | 9.74 | 0.561 | 28.33 | 1.448 |
| R2ET [25] | 0.499 | 0.496 | 7.62 | 0.643 | 5.469 | 0.405 |
| Ours | 0.284 | 0.229 | 3.50 | 0.680 | 0.436 | 0.143 |
",
+ "image_path": "2b82f0a5a5983c28b764dadc23600be325d82a9c9e9245e1eafee3844373281c.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 5,
+ "angle": 0,
+ "type": "table_body"
+ }
+ ],
+ "index": 5
+ },
+ {
+ "type": "table",
+ "bbox": [
+ 306,
+ 217,
+ 545,
+ 309
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 305,
+ 167,
+ 545,
+ 212
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 167,
+ 545,
+ 212
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 167,
+ 545,
+ 212
+ ],
+ "type": "text",
+ "content": "Table 1. Quantitative comparison with the state-of-the-arts. "
+ },
+ {
+ "bbox": [
+ 305,
+ 167,
+ 545,
+ 212
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{MSE}^{lc}"
+ },
+ {
+ "bbox": [
+ 305,
+ 167,
+ 545,
+ 212
+ ],
+ "type": "text",
+ "content": " denotes the local MSE. ITM indicates the image-text matching score. FID is Fréchet inception distance of motion semantics. SCL is the semantics consistency loss."
+ }
+ ]
+ }
+ ],
+ "index": 6,
+ "angle": 0,
+ "type": "table_caption"
+ },
+ {
+ "bbox": [
+ 306,
+ 217,
+ 545,
+ 309
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 217,
+ 545,
+ 309
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 217,
+ 545,
+ 309
+ ],
+ "type": "table",
+ "html": "| Method | MSE ↓ | MSElc ↓ | Pen.% ↓ | ITM ↑ | FID ↓ | SCL ↓ |
| SMTtws | 0.248 | 0.129 | 8.37 | 0.586 | 7.727 | 0.769 |
| SMTtwf | 7.798 | 7.083 | 0.44 | 0.432 | 56.53 | 13.29 |
| SMTtwa | 0.335 | 0.288 | 5.36 | 0.658 | 2.826 | 0.266 |
| SMTfwp | 0.439 | 0.368 | 1.22 | 0.597 | 7.241 | 0.583 |
| SMTfwi | 5.418 | 4.576 | 4.41 | 0.552 | 78.46 | 18.96 |
| SMTfwq | 0.739 | 0.517 | 4.56 | 0.668 | 2.497 | 0.191 |
| SMTOurs | 0.284 | 0.229 | 3.50 | 0.680 | 0.436 | 0.143 |
",
+ "image_path": "3f37e06acad8ecddeb20ed992af0b49d22e67417caca59727deb5a925f0362a9.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 7,
+ "angle": 0,
+ "type": "table_body"
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": "Table 2. Ablation study. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{tws}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network trained with only skeleton-aware pre-training. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{twf}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network trained with only semantics and geometry fine-tuning. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{twa}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network trained in one stage. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwp}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network fine-tuned with only the interpenetration loss. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwi}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network fine-tuned with image features. "
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwq}"
+ },
+ {
+ "bbox": [
+ 305,
+ 312,
+ 545,
+ 389
+ ],
+ "type": "text",
+ "content": " is the network fine-tuned with the features of the querying transformer."
+ }
+ ]
+ }
+ ],
+ "index": 8,
+ "angle": 0,
+ "type": "text"
+ },
+ {
+ "bbox": [
+ 304,
+ 397,
+ 545,
+ 494
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 397,
+ 545,
+ 494
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 397,
+ 545,
+ 494
+ ],
+ "type": "text",
+ "content": "our method is able to successfully preserve high-level motion semantics leveraging the vision-language model. We observe that our approach reaches the best results among all methods, achieving more reliable semantics preservation and lower interpenetration rates. It suggests that with semantics and geometry fine-tuning, our method could effectively solve interpenetration issues together with semantics preservation."
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 306,
+ 502,
+ 406,
+ 514
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 502,
+ 406,
+ 514
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 502,
+ 406,
+ 514
+ ],
+ "type": "text",
+ "content": "4.3. Ablation Studies"
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 304,
+ 521,
+ 545,
+ 653
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 521,
+ 545,
+ 653
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 521,
+ 545,
+ 653
+ ],
+ "type": "text",
+ "content": "Skeleton-aware pre-training. The proposed method can be divided into two stage: pre-training and fine-tuning. To illustrate the importance of skeleton-aware pre-training, we evaluate the network trained with only the semantics consistency loss and the interpenetration loss in Tab. 2, denoted as "
+ },
+ {
+ "bbox": [
+ 304,
+ 521,
+ 545,
+ 653
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{twf}"
+ },
+ {
+ "bbox": [
+ 304,
+ 521,
+ 545,
+ 653
+ ],
+ "type": "text",
+ "content": ". The network trained without skeleton-aware pretraining performs worst in MSE and semantics preservation. A reasonable explanation is that the semantics consistency loss is highly non-linear, so it is important to pre-train the network at the skeletal level to provide better initial values. We also visualize qualitative results in Fig. 5."
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 304,
+ 653,
+ 545,
+ 715
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 653,
+ 545,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 653,
+ 545,
+ 715
+ ],
+ "type": "text",
+ "content": "Semantics & geometry fine-tuning. We also conduct ablation study to illustrate the importance of semantics and geometry fine-tuning in Tab. 2. We first evaluate the performance of the skeleton-aware model without fine-tuning, denoted as "
+ },
+ {
+ "bbox": [
+ 304,
+ 653,
+ 545,
+ 715
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{tws}"
+ },
+ {
+ "bbox": [
+ 304,
+ 653,
+ 545,
+ 715
+ ],
+ "type": "text",
+ "content": ". Though it reaches the best global posi"
+ }
+ ]
+ }
+ ],
+ "index": 12
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2160"
+ }
+ ]
+ }
+ ],
+ "index": 13
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 5
+ },
+ {
+ "para_blocks": [
+ {
+ "type": "image",
+ "bbox": [
+ 47,
+ 74,
+ 547,
+ 317
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 47,
+ 74,
+ 547,
+ 317
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 74,
+ 547,
+ 317
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 74,
+ 547,
+ 317
+ ],
+ "type": "image",
+ "image_path": "67d286d86772b680eeddbe7dbdd2a2bf855b305691d26c3b7e03dbdc18835f4f.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 0,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 46,
+ 323,
+ 546,
+ 357
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 323,
+ 546,
+ 357
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 323,
+ 546,
+ 357
+ ],
+ "type": "text",
+ "content": "Figure 4. Qualitative comparison. The results demonstrate that our method can effectively preserve semantics while the baseline methods suffer from interpenetration or semantic information loss. From the first column to the last column are the source motion, the Copy strategy, NKN [22], SAN [2], R2ET [25], our method and text descriptions, respectively."
+ }
+ ]
+ }
+ ],
+ "index": 1,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 0
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 58,
+ 365,
+ 274,
+ 529
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 58,
+ 365,
+ 274,
+ 529
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 58,
+ 365,
+ 274,
+ 529
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 58,
+ 365,
+ 274,
+ 529
+ ],
+ "type": "image",
+ "image_path": "2cefa79331f00b4de3501751f6fd2d2bc1d78bc2156f2a7c10e76f96b04c4e6d.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 2,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 46,
+ 533,
+ 287,
+ 588
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 533,
+ 287,
+ 588
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 533,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": "Figure 5. The qualitative comparison of ablation study between the network without fine-tuning (TWS), the network trained with only semantics and geometry fine-tuning (TWF), the network trained with all loss functions (TWA), the network fine-tuned with only the interpenetration loss (FWP) and our full model (All)."
+ }
+ ]
+ }
+ ],
+ "index": 3,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 46,
+ 594,
+ 289,
+ 713
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 594,
+ 289,
+ 713
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 594,
+ 289,
+ 713
+ ],
+ "type": "text",
+ "content": "tion MSE, it suffers from interpenetration and semantic information loss because of the low-quality motion data provided by Mixamo. We next evaluate the network fine-tuned with only the interpenetration loss, denoted as "
+ },
+ {
+ "bbox": [
+ 46,
+ 594,
+ 289,
+ 713
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwp}"
+ },
+ {
+ "bbox": [
+ 46,
+ 594,
+ 289,
+ 713
+ ],
+ "type": "text",
+ "content": ". This version results in a significant boost in terms of penetration rate. However, the gradient of interpenetration loss is only relevant with the face normals of the geometry mesh without considering the semantic information conveyed in the motion. It indicates the importance of the semantic consistency loss that makes the network reach a better balance"
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 304,
+ 367,
+ 545,
+ 475
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 367,
+ 545,
+ 475
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 367,
+ 545,
+ 475
+ ],
+ "type": "text",
+ "content": "between interpenetration and semantics. We also try to train the network with all loss functions in one stage, denoted as "
+ },
+ {
+ "bbox": [
+ 304,
+ 367,
+ 545,
+ 475
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{twa}"
+ },
+ {
+ "bbox": [
+ 304,
+ 367,
+ 545,
+ 475
+ ],
+ "type": "text",
+ "content": ". However, it is challenging for the model to acquire general knowledge of interpenetration and semantics that is suitable for every character with limited data. Therefore, training the model with skeleton-aware pre-training and fine-tuning it with semantics consistency and geometry constraints for each target character remains a more reasonable and data-efficient strategy."
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "text",
+ "content": "Latent semantic embedding. The vision-language model used for semantic extraction can be divided into three parts: the image encoder from CLIP [19], the querying transformer and the large language model. In Tab. 2, we compare the feature outputted by the image encoder, the querying transformer and the encoder of the large language model, denoted as "
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwi}"
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "text",
+ "content": ", "
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{fwq}"
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "text",
+ "content": ", and "
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "inline_equation",
+ "content": "\\mathrm{SMT}_{Ours}"
+ },
+ {
+ "bbox": [
+ 304,
+ 480,
+ 546,
+ 660
+ ],
+ "type": "text",
+ "content": ", respectively. The results show that the image feature performs worse since it is greatly affected by the appearance of the character. It indicates that with the help of the large language model, the semantic representation better focuses on the semantic meaning of the motion instead of the character's visual appearance. Therefore, the encoder output of the large language model is more suitable for semantic embedding. More details can be found in the supplementary materials."
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 304,
+ 665,
+ 545,
+ 715
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 665,
+ 545,
+ 715
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 665,
+ 545,
+ 715
+ ],
+ "type": "text",
+ "content": "Prompt design. To validate the importance of guiding visual question answering, we compare the textual descriptions generated by visual question answering with and without guiding questions as well as image captioning. The re"
+ }
+ ]
+ }
+ ],
+ "index": 7
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 314,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 314,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 314,
+ 757
+ ],
+ "type": "text",
+ "content": "2161"
+ }
+ ]
+ }
+ ],
+ "index": 8
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 6
+ },
+ {
+ "para_blocks": [
+ {
+ "type": "image",
+ "bbox": [
+ 49,
+ 71,
+ 113,
+ 135
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 113,
+ 135
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 113,
+ 135
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 49,
+ 71,
+ 113,
+ 135
+ ],
+ "type": "image",
+ "image_path": "06dd6b0b0ab33fa1a55868a80afc687cc42688a78a6088cc89698e1acbf6ac3d.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 0,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 118,
+ 77,
+ 154,
+ 83
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 77,
+ 154,
+ 83
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 77,
+ 154,
+ 83
+ ],
+ "type": "text",
+ "content": "Image Captioning"
+ }
+ ]
+ }
+ ],
+ "index": 1,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 118,
+ 84,
+ 176,
+ 97
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 84,
+ 176,
+ 97
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 84,
+ 176,
+ 97
+ ],
+ "type": "text",
+ "content": "A 3d model of a boy wearing glasses and a hat."
+ }
+ ]
+ }
+ ],
+ "index": 2,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 118,
+ 110,
+ 171,
+ 116
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 110,
+ 171,
+ 116
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 110,
+ 171,
+ 116
+ ],
+ "type": "text",
+ "content": "Visual Question Answering"
+ }
+ ]
+ }
+ ],
+ "index": 5,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 118,
+ 117,
+ 183,
+ 123
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 117,
+ 183,
+ 123
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 117,
+ 183,
+ 123
+ ],
+ "type": "text",
+ "content": "Q: What is the character doing?"
+ }
+ ]
+ }
+ ],
+ "index": 6,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 118,
+ 123,
+ 174,
+ 130
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 123,
+ 174,
+ 130
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 123,
+ 174,
+ 130
+ ],
+ "type": "text",
+ "content": "A: The character is praying."
+ }
+ ]
+ }
+ ],
+ "index": 7,
+ "angle": 0,
+ "type": "image_footnote"
+ }
+ ],
+ "index": 0
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 48,
+ 140,
+ 113,
+ 205
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 48,
+ 140,
+ 113,
+ 205
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 140,
+ 113,
+ 205
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 140,
+ 113,
+ 205
+ ],
+ "type": "image",
+ "image_path": "89686dcf50707ed7e14930dbdac4e66d787a29b9ad437e6d61383caac35da9e3.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 14,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 118,
+ 145,
+ 154,
+ 152
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 145,
+ 154,
+ 152
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 145,
+ 154,
+ 152
+ ],
+ "type": "text",
+ "content": "Image Captioning"
+ }
+ ]
+ }
+ ],
+ "index": 15,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 118,
+ 152,
+ 187,
+ 165
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 152,
+ 187,
+ 165
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 152,
+ 187,
+ 165
+ ],
+ "type": "text",
+ "content": "A 3d model of a robot running on a cheeked floor."
+ }
+ ]
+ }
+ ],
+ "index": 16,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 118,
+ 176,
+ 171,
+ 182
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 176,
+ 171,
+ 182
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 176,
+ 171,
+ 182
+ ],
+ "type": "text",
+ "content": "Visual Question Answering"
+ }
+ ]
+ }
+ ],
+ "index": 17,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 118,
+ 183,
+ 184,
+ 203
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 118,
+ 183,
+ 184,
+ 203
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 118,
+ 183,
+ 184,
+ 203
+ ],
+ "type": "text",
+ "content": "Q: What is the character doing? \nA: The character is running on a checkered floor."
+ }
+ ]
+ }
+ ],
+ "index": 18,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 47,
+ 211,
+ 287,
+ 233
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 211,
+ 287,
+ 233
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 211,
+ 287,
+ 233
+ ],
+ "type": "text",
+ "content": "Figure 6. Text descriptions generated by different ways. The guiding visual question answering yields more comprehensive results."
+ }
+ ]
+ }
+ ],
+ "index": 19,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 14
+ },
+ {
+ "type": "table",
+ "bbox": [
+ 50,
+ 239,
+ 280,
+ 323
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 50,
+ 239,
+ 280,
+ 323
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 50,
+ 239,
+ 280,
+ 323
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 50,
+ 239,
+ 280,
+ 323
+ ],
+ "type": "table",
+ "html": "| Method | Quality ↑ | Smoothness ↑ | Semantics ↑ |
| Copy | 0.72 | 0.86 | 0.71 |
| NKN [22] | 0.65 | 0.80 | 0.66 |
| SAN [2] | 0.69 | 0.82 | 0.67 |
| R2ET [25] | 0.80 | 0.61 | 0.85 |
| Ours | 0.89 | 0.80 | 0.92 |
",
+ "image_path": "2ac2256d22a7e955f6a30ba90a569076e2396410284e0fcb87ca4c00023820b5.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 20,
+ "angle": 0,
+ "type": "table_body"
+ },
+ {
+ "bbox": [
+ 47,
+ 326,
+ 287,
+ 359
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 326,
+ 287,
+ 359
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 326,
+ 287,
+ 359
+ ],
+ "type": "text",
+ "content": "Table 3. User study results. We collect 100 comparisons in three aspects. Our method gets highest scores in the overall quality as well as semantics preservation."
+ }
+ ]
+ }
+ ],
+ "index": 21,
+ "angle": 0,
+ "type": "table_footnote"
+ }
+ ],
+ "index": 20
+ },
+ {
+ "bbox": [
+ 46,
+ 363,
+ 287,
+ 447
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 363,
+ 287,
+ 447
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 363,
+ 287,
+ 447
+ ],
+ "type": "text",
+ "content": "sults in Fig. 6 indicate that using guiding questions for visual question answering yields the most comprehensive and reasonable text descriptions for motion semantics. Compared with image captioning that uses the vision-language model to generate text description directly from images, the answers from visual question answering task can be guided by the designed question to focus on motion semantics."
+ }
+ ]
+ }
+ ],
+ "index": 22
+ },
+ {
+ "bbox": [
+ 47,
+ 454,
+ 122,
+ 467
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 454,
+ 122,
+ 467
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 454,
+ 122,
+ 467
+ ],
+ "type": "text",
+ "content": "4.4. User Study"
+ }
+ ]
+ }
+ ],
+ "index": 23
+ },
+ {
+ "bbox": [
+ 46,
+ 472,
+ 287,
+ 616
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 472,
+ 287,
+ 616
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 472,
+ 287,
+ 616
+ ],
+ "type": "text",
+ "content": "We conduct a user study to evaluate the performance of our method against the baseline methods. Human subjects are given 12 videos. Each video includes one source skinned motion and five anonymous skinned results. The retargeted results are randomly placed. We ask subjects to rate the results out of 1.0 in three aspects: overall quality, motion smoothness and semantics preservation. We collect a total of 100 comparisons. During the evaluation, users are required to extract semantic meaning from the source motion themselves and then evaluate the preservation of retargeted motions. In general, more than "
+ },
+ {
+ "bbox": [
+ 46,
+ 472,
+ 287,
+ 616
+ ],
+ "type": "inline_equation",
+ "content": "92\\%"
+ },
+ {
+ "bbox": [
+ 46,
+ 472,
+ 287,
+ 616
+ ],
+ "type": "text",
+ "content": " of subjects prefer the retargeting results of our method."
+ }
+ ]
+ }
+ ],
+ "index": 24
+ },
+ {
+ "bbox": [
+ 47,
+ 623,
+ 260,
+ 635
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 47,
+ 623,
+ 260,
+ 635
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 47,
+ 623,
+ 260,
+ 635
+ ],
+ "type": "text",
+ "content": "4.5. Retargeting Motion from Human Videos"
+ }
+ ]
+ }
+ ],
+ "index": 25
+ },
+ {
+ "bbox": [
+ 46,
+ 642,
+ 287,
+ 712
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 46,
+ 642,
+ 287,
+ 712
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 46,
+ 642,
+ 287,
+ 712
+ ],
+ "type": "text",
+ "content": "In this section, we evaluate our motion retargeting approach from human videos in the human3.6M [10] dataset. Video retargeting involves two stages: human pose estimation from video and motion retargeting. However, inaccuracies in estimating body postures may result in semantic information loss and thus accumulation of errors in the entire"
+ }
+ ]
+ }
+ ],
+ "index": 26
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 318,
+ 71,
+ 388,
+ 140
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 198,
+ 84,
+ 268,
+ 90
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 84,
+ 268,
+ 90
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 84,
+ 268,
+ 90
+ ],
+ "type": "text",
+ "content": "Guiding Visual Question Answering"
+ }
+ ]
+ }
+ ],
+ "index": 3,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 198,
+ 90,
+ 283,
+ 95
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 90,
+ 283,
+ 95
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 90,
+ 283,
+ 95
+ ],
+ "type": "text",
+ "content": "Q: Where are the hands of the character?"
+ }
+ ]
+ }
+ ],
+ "index": 4,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 318,
+ 71,
+ 388,
+ 140
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 318,
+ 71,
+ 388,
+ 140
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 318,
+ 71,
+ 388,
+ 140
+ ],
+ "type": "image",
+ "image_path": "31a5234b3dd525a192b0a0e30eebda6fa48e4919aaca3bb3d5291bf467c490ce.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 27,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 27
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 390,
+ 71,
+ 459,
+ 140
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 390,
+ 71,
+ 459,
+ 140
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 390,
+ 71,
+ 459,
+ 140
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 390,
+ 71,
+ 459,
+ 140
+ ],
+ "type": "image",
+ "image_path": "6b583f7206f29c0b1fc775e655fae1efd11ecf2e6f701ff2617c580822706bd8.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 28,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 28
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 462,
+ 71,
+ 531,
+ 140
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 462,
+ 71,
+ 531,
+ 140
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 462,
+ 71,
+ 531,
+ 140
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 462,
+ 71,
+ 531,
+ 140
+ ],
+ "type": "image",
+ "image_path": "8f56ffaf23669bc2d89939f221262149d7058f8c5b811f3796ffd00f378fcaf0.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 29,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 29
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 318,
+ 142,
+ 388,
+ 212
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 198,
+ 152,
+ 268,
+ 158
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 152,
+ 268,
+ 158
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 152,
+ 268,
+ 158
+ ],
+ "type": "text",
+ "content": "Guiding Visual Question Answering"
+ }
+ ]
+ }
+ ],
+ "index": 9,
+ "angle": 0,
+ "type": "image_caption"
+ },
+ {
+ "bbox": [
+ 198,
+ 159,
+ 283,
+ 170
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 159,
+ 283,
+ 170
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 159,
+ 283,
+ 170
+ ],
+ "type": "text",
+ "content": "Q: Where are the hands of the character? A: Holding a ball."
+ }
+ ]
+ }
+ ],
+ "index": 10,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 198,
+ 170,
+ 264,
+ 175
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 170,
+ 264,
+ 175
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 170,
+ 264,
+ 175
+ ],
+ "type": "text",
+ "content": "Q: What is the character doing?"
+ }
+ ]
+ }
+ ],
+ "index": 11,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 198,
+ 175,
+ 280,
+ 193
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 198,
+ 175,
+ 280,
+ 193
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 198,
+ 175,
+ 280,
+ 193
+ ],
+ "type": "text",
+ "content": "A: The character is trying to throw a ball with both hands on the right side of his body."
+ }
+ ]
+ }
+ ],
+ "index": 12,
+ "angle": 0,
+ "type": "image_footnote"
+ },
+ {
+ "bbox": [
+ 318,
+ 142,
+ 388,
+ 212
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 318,
+ 142,
+ 388,
+ 212
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 318,
+ 142,
+ 388,
+ 212
+ ],
+ "type": "image",
+ "image_path": "90c070f166b3ff245499e227a911f1815b1940b8b7385812cb83bc7354f8103a.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 30,
+ "angle": 0,
+ "type": "image_body"
+ },
+ {
+ "bbox": [
+ 306,
+ 216,
+ 545,
+ 249
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 216,
+ 545,
+ 249
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 216,
+ 545,
+ 249
+ ],
+ "type": "text",
+ "content": "Figure 7. We retarget from human motion clips in the human3.6M [10] dataset. The retargeted motions are free from interpenetration and preserve semantics well."
+ }
+ ]
+ }
+ ],
+ "index": 33,
+ "angle": 0,
+ "type": "image_caption"
+ }
+ ],
+ "index": 30
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 390,
+ 143,
+ 459,
+ 212
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 390,
+ 143,
+ 459,
+ 212
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 390,
+ 143,
+ 459,
+ 212
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 390,
+ 143,
+ 459,
+ 212
+ ],
+ "type": "image",
+ "image_path": "6e07af2e5d7ced8a5d3c5c66f78180d8a2e50126bd1e2e0cee03ada486231a7d.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 31,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 31
+ },
+ {
+ "type": "image",
+ "bbox": [
+ 462,
+ 143,
+ 531,
+ 212
+ ],
+ "blocks": [
+ {
+ "bbox": [
+ 462,
+ 143,
+ 531,
+ 212
+ ],
+ "lines": [
+ {
+ "bbox": [
+ 462,
+ 143,
+ 531,
+ 212
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 462,
+ 143,
+ 531,
+ 212
+ ],
+ "type": "image",
+ "image_path": "1be675d4ea3fdae8b9bd00e2d57b8dc35130e1f7e72e0afc87a8a71c3a69667a.jpg"
+ }
+ ]
+ }
+ ],
+ "index": 32,
+ "angle": 0,
+ "type": "image_body"
+ }
+ ],
+ "index": 32
+ },
+ {
+ "bbox": [
+ 305,
+ 252,
+ 545,
+ 335
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 305,
+ 252,
+ 545,
+ 335
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 305,
+ 252,
+ 545,
+ 335
+ ],
+ "type": "text",
+ "content": "retargeting process. Therefore, we first get the estimated human pose from [17]. Then we utilize the vision-language model to extract the semantic embedding of the original video and calculate the semantic consistency loss to optimize the joint angles acquired from the retargeting process directly. In Fig. 7, we show our results of motion retargeting from human videos to Mixamo characters."
+ }
+ ]
+ }
+ ],
+ "index": 34
+ },
+ {
+ "bbox": [
+ 306,
+ 346,
+ 383,
+ 358
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 306,
+ 346,
+ 383,
+ 358
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 306,
+ 346,
+ 383,
+ 358
+ ],
+ "type": "text",
+ "content": "5. Conclusions"
+ }
+ ]
+ }
+ ],
+ "index": 35
+ },
+ {
+ "bbox": [
+ 304,
+ 366,
+ 545,
+ 498
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 366,
+ 545,
+ 498
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 366,
+ 545,
+ 498
+ ],
+ "type": "text",
+ "content": "In this paper, we present a novel semantics-aware motion retargeting method that leverages the capabilities of vision-language models to extract semantic embeddings and facilitate the preservation of motion semantics. This approach offers a promising solution to the challenge of lacking labelled semantic data for motion. Our proposed method involves a two-stage process that integrates skeleton-level motion characteristics and semantics-level consistency along with geometry constraints. Experimental results demonstrate that our approach excels in generating high-quality retargeted motions with semantics consistency."
+ }
+ ]
+ }
+ ],
+ "index": 36
+ },
+ {
+ "bbox": [
+ 304,
+ 498,
+ 545,
+ 605
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 498,
+ 545,
+ 605
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 498,
+ 545,
+ 605
+ ],
+ "type": "text",
+ "content": "Limitations. The main limitation is the performance of the vision-language model in extracting motion semantics. Without the support of motion semantic datasets of sufficient data size and quality, we rely on the model pre-trained on large image-text datasets. Although the model achieves some remarkable results in motion semantics extraction, there is still room for improvement. In addition, the projection of 3D motion into 2D images loses spatial information and affects the performance."
+ }
+ ]
+ }
+ ],
+ "index": 37
+ },
+ {
+ "bbox": [
+ 304,
+ 606,
+ 545,
+ 676
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 606,
+ 545,
+ 676
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 606,
+ 545,
+ 676
+ ],
+ "type": "text",
+ "content": "Future work. Compared with 2D vision-language models, 3D vision-language models have the advantage of capturing spatial relationships directly. Therefore, fine-tuning 3D vision-language models to make them more suitable for the task of motion semantics extraction is worth exploring in our future work."
+ }
+ ]
+ }
+ ],
+ "index": 38
+ },
+ {
+ "bbox": [
+ 304,
+ 677,
+ 545,
+ 712
+ ],
+ "type": "text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 304,
+ 677,
+ 545,
+ 712
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 304,
+ 677,
+ 545,
+ 712
+ ],
+ "type": "text",
+ "content": "Acknowledgements. This work was supported by the National Nature Science Foundation of China under Grant 62173293."
+ }
+ ]
+ }
+ ],
+ "index": 39
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 294,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2162"
+ }
+ ]
+ }
+ ],
+ "index": 40
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 7
+ },
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 48,
+ 71,
+ 106,
+ 83
+ ],
+ "type": "title",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 71,
+ 106,
+ 83
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 71,
+ 106,
+ 83
+ ],
+ "type": "text",
+ "content": "References"
+ }
+ ]
+ }
+ ],
+ "index": 0
+ },
+ {
+ "bbox": [
+ 48,
+ 91,
+ 288,
+ 714
+ ],
+ "type": "list",
+ "angle": 0,
+ "index": 15,
+ "blocks": [
+ {
+ "bbox": [
+ 53,
+ 91,
+ 287,
+ 112
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 91,
+ 287,
+ 112
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 91,
+ 287,
+ 112
+ ],
+ "type": "text",
+ "content": "[1] Adobe's mixamo. https://www MIXamo.com/. Accessed: 2023-02-08."
+ }
+ ]
+ }
+ ],
+ "index": 1
+ },
+ {
+ "bbox": [
+ 53,
+ 114,
+ 287,
+ 158
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 114,
+ 287,
+ 158
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 114,
+ 287,
+ 158
+ ],
+ "type": "text",
+ "content": "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. ACM Transactions on Graphics (TOG), 39(4):62-1, 2020."
+ }
+ ]
+ }
+ ],
+ "index": 2
+ },
+ {
+ "bbox": [
+ 53,
+ 159,
+ 288,
+ 204
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 159,
+ 288,
+ 204
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 159,
+ 288,
+ 204
+ ],
+ "type": "text",
+ "content": "[3] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022."
+ }
+ ]
+ }
+ ],
+ "index": 3
+ },
+ {
+ "bbox": [
+ 53,
+ 205,
+ 287,
+ 237
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 205,
+ 287,
+ 237
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 205,
+ 287,
+ 237
+ ],
+ "type": "text",
+ "content": "[4] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. The Journal of Visualization and Computer Animation, 11(5):223-235, 2000."
+ }
+ ]
+ }
+ ],
+ "index": 4
+ },
+ {
+ "bbox": [
+ 53,
+ 239,
+ 288,
+ 294
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 239,
+ 288,
+ 294
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 239,
+ 288,
+ 294
+ ],
+ "type": "text",
+ "content": "[5] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022."
+ }
+ ]
+ }
+ ],
+ "index": 5
+ },
+ {
+ "bbox": [
+ 53,
+ 296,
+ 288,
+ 329
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 296,
+ 288,
+ 329
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 296,
+ 288,
+ 329
+ ],
+ "type": "text",
+ "content": "[6] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 33-42, 1998."
+ }
+ ]
+ }
+ ],
+ "index": 6
+ },
+ {
+ "bbox": [
+ 53,
+ 331,
+ 287,
+ 385
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 331,
+ 287,
+ 385
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 331,
+ 287,
+ 385
+ ],
+ "type": "text",
+ "content": "[7] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In Proceedings of the 28th ACM International Conference on Multimedia, pages 2021-2029, 2020."
+ }
+ ]
+ }
+ ],
+ "index": 7
+ },
+ {
+ "bbox": [
+ 53,
+ 387,
+ 287,
+ 441
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 387,
+ 287,
+ 441
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 387,
+ 287,
+ 441
+ ],
+ "type": "text",
+ "content": "[8] Chuan Guo, Shihao Zou, Xinxin Zuo, Sen Wang, Wei Ji, Xingyu Li, and Li Cheng. Generating diverse and natural 3d human motions from text. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5152-5161, 2022."
+ }
+ ]
+ }
+ ],
+ "index": 8
+ },
+ {
+ "bbox": [
+ 53,
+ 443,
+ 287,
+ 487
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 53,
+ 443,
+ 287,
+ 487
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 53,
+ 443,
+ 287,
+ 487
+ ],
+ "type": "text",
+ "content": "[9] Lei Hu, Zihao Zhang, Chongyang Zhong, Boyuan Jiang, and Shihong Xia. Pose-aware attention network for flexible motion retargeting by body part. IEEE Transactions on Visualization and Computer Graphics, pages 1-17, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 9
+ },
+ {
+ "bbox": [
+ 48,
+ 488,
+ 287,
+ 543
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 488,
+ 287,
+ 543
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 488,
+ 287,
+ 543
+ ],
+ "type": "text",
+ "content": "[10] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2014."
+ }
+ ]
+ }
+ ],
+ "index": 10
+ },
+ {
+ "bbox": [
+ 48,
+ 544,
+ 287,
+ 588
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 544,
+ 287,
+ 588
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 544,
+ 287,
+ 588
+ ],
+ "type": "text",
+ "content": "[11] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 39-48, 1999."
+ }
+ ]
+ }
+ ],
+ "index": 11
+ },
+ {
+ "bbox": [
+ 48,
+ 590,
+ 287,
+ 644
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 590,
+ 287,
+ 644
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 590,
+ 287,
+ 644
+ ],
+ "type": "text",
+ "content": "[12] John P Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: a unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 165-172, 2000."
+ }
+ ]
+ }
+ ],
+ "index": 12
+ },
+ {
+ "bbox": [
+ 48,
+ 647,
+ 287,
+ 689
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 647,
+ 287,
+ 689
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 647,
+ 287,
+ 689
+ ],
+ "type": "text",
+ "content": "[13] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 13
+ },
+ {
+ "bbox": [
+ 48,
+ 691,
+ 287,
+ 714
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 691,
+ 287,
+ 714
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 691,
+ 287,
+ 714
+ ],
+ "type": "text",
+ "content": "[14] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with"
+ }
+ ]
+ }
+ ],
+ "index": 14
+ }
+ ],
+ "sub_type": "ref_text"
+ },
+ {
+ "bbox": [
+ 308,
+ 73,
+ 545,
+ 714
+ ],
+ "type": "list",
+ "angle": 0,
+ "index": 30,
+ "blocks": [
+ {
+ "bbox": [
+ 326,
+ 73,
+ 545,
+ 95
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 326,
+ 73,
+ 545,
+ 95
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 326,
+ 73,
+ 545,
+ 95
+ ],
+ "type": "text",
+ "content": "frozen image encoders and large language models. In ICML, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 16
+ },
+ {
+ "bbox": [
+ 308,
+ 96,
+ 545,
+ 129
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 96,
+ 545,
+ 129
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 96,
+ 545,
+ 129
+ ],
+ "type": "text",
+ "content": "[15] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. Pmnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In BMVC, page 7, 2019."
+ }
+ ]
+ }
+ ],
+ "index": 17
+ },
+ {
+ "bbox": [
+ 308,
+ 130,
+ 545,
+ 175
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 130,
+ 545,
+ 175
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 130,
+ 545,
+ 175
+ ],
+ "type": "text",
+ "content": "[16] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019."
+ }
+ ]
+ }
+ ],
+ "index": 18
+ },
+ {
+ "bbox": [
+ 308,
+ 175,
+ 545,
+ 219
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 175,
+ 545,
+ 219
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 175,
+ 545,
+ 219
+ ],
+ "type": "text",
+ "content": "[17] Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Accurate 3d hand pose estimation for whole-body 3d human mesh estimation. In Computer Vision and Pattern Recognition Workshop (CVPRW), 2022."
+ }
+ ]
+ }
+ ],
+ "index": 19
+ },
+ {
+ "bbox": [
+ 308,
+ 220,
+ 545,
+ 263
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 220,
+ 545,
+ 263
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 220,
+ 545,
+ 263
+ ],
+ "type": "text",
+ "content": "[18] Zoran Popović and Andrew Witkin. Physically based motion transformation. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 11-20, 1999."
+ }
+ ]
+ }
+ ],
+ "index": 20
+ },
+ {
+ "bbox": [
+ 308,
+ 265,
+ 545,
+ 331
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 265,
+ 545,
+ 331
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 265,
+ 545,
+ 331
+ ],
+ "type": "text",
+ "content": "[19] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021."
+ }
+ ]
+ }
+ ],
+ "index": 21
+ },
+ {
+ "bbox": [
+ 308,
+ 332,
+ 545,
+ 376
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 332,
+ 545,
+ 376
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 332,
+ 545,
+ 376
+ ],
+ "type": "text",
+ "content": "[20] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022."
+ }
+ ]
+ }
+ ],
+ "index": 22
+ },
+ {
+ "bbox": [
+ 308,
+ 377,
+ 545,
+ 410
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 377,
+ 545,
+ 410
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 377,
+ 545,
+ 410
+ ],
+ "type": "text",
+ "content": "[21] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022."
+ }
+ ]
+ }
+ ],
+ "index": 23
+ },
+ {
+ "bbox": [
+ 308,
+ 411,
+ 545,
+ 464
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 411,
+ 545,
+ 464
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 411,
+ 545,
+ 464
+ ],
+ "type": "text",
+ "content": "[22] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8639-8648, 2018."
+ }
+ ]
+ }
+ ],
+ "index": 24
+ },
+ {
+ "bbox": [
+ 308,
+ 468,
+ 545,
+ 511
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 468,
+ 545,
+ 511
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 468,
+ 545,
+ 511
+ ],
+ "type": "text",
+ "content": "[23] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9720-9729, 2021."
+ }
+ ]
+ }
+ ],
+ "index": 25
+ },
+ {
+ "bbox": [
+ 308,
+ 513,
+ 545,
+ 545
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 513,
+ 545,
+ 545
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 513,
+ 545,
+ 545
+ ],
+ "type": "text",
+ "content": "[24] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, 2021."
+ }
+ ]
+ }
+ ],
+ "index": 26
+ },
+ {
+ "bbox": [
+ 308,
+ 547,
+ 545,
+ 612
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 547,
+ 545,
+ 612
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 547,
+ 545,
+ 612
+ ],
+ "type": "text",
+ "content": "[25] Jiaxu Zhang, Junwu Weng, Di Kang, Fang Zhao, Shaoli Huang, Xuefei Zhe, Linchao Bao, Ying Shan, Jue Wang, and Zhigang Tu. Skinned motion retargeting with residual perception of motion semantics & geometry. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13864-13872, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 27
+ },
+ {
+ "bbox": [
+ 308,
+ 613,
+ 545,
+ 667
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 613,
+ 545,
+ 667
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 613,
+ 545,
+ 667
+ ],
+ "type": "text",
+ "content": "[26] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2019."
+ }
+ ]
+ }
+ ],
+ "index": 28
+ },
+ {
+ "bbox": [
+ 308,
+ 670,
+ 545,
+ 714
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 308,
+ 670,
+ 545,
+ 714
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 308,
+ 670,
+ 545,
+ 714
+ ],
+ "type": "text",
+ "content": "[27] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 29
+ }
+ ],
+ "sub_type": "ref_text"
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 748,
+ 315,
+ 757
+ ],
+ "type": "text",
+ "content": "2163"
+ }
+ ]
+ }
+ ],
+ "index": 31
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 8
+ },
+ {
+ "para_blocks": [
+ {
+ "bbox": [
+ 48,
+ 72,
+ 288,
+ 107
+ ],
+ "type": "ref_text",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 48,
+ 72,
+ 288,
+ 107
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 48,
+ 72,
+ 288,
+ 107
+ ],
+ "type": "text",
+ "content": "[28] Ziyu Zhu, Xiaojian Ma, Yixin Chen, Zhidong Deng, Siyuan Huang, and Qing Li. 3d-vista: Pre-trained transformer for 3d vision and text alignment. ICCV, 2023."
+ }
+ ]
+ }
+ ],
+ "index": 0
+ }
+ ],
+ "discarded_blocks": [
+ {
+ "bbox": [
+ 295,
+ 749,
+ 316,
+ 757
+ ],
+ "type": "page_number",
+ "angle": 0,
+ "lines": [
+ {
+ "bbox": [
+ 295,
+ 749,
+ 316,
+ 757
+ ],
+ "spans": [
+ {
+ "bbox": [
+ 295,
+ 749,
+ 316,
+ 757
+ ],
+ "type": "text",
+ "content": "2164"
+ }
+ ]
+ }
+ ],
+ "index": 1
+ }
+ ],
+ "page_size": [
+ 612,
+ 792
+ ],
+ "page_idx": 9
+ }
+ ],
+ "_backend": "vlm",
+ "_version_name": "2.6.4"
+}
\ No newline at end of file