data
dict
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0NDIYvq0g", "doi": "10.1109/CVPR52688.2022.00552", "title": "SelfRecon: Self Reconstruction Your Digital Avatar from Monocular Video", "normalizedTitle": "SelfRecon: Self Reconstruction Your Digital Avatar from Monocular Video", "abstract": "We propose SelfRecon, a clothed human body reconstruction method that combines implicit and explicit repre-sentations to recover space-time coherent geometries from a monocular self-rotating human video. Explicit methods require a predefined template mesh for a given sequence, while the template is hard to acquire for a specific subject. Meanwhile, the fixed topology limits the reconstruction accuracy and clothing types. Implicit representation supports arbitrary topology and can represent high-fidelity geometry shapes due to its continuous nature. However, it is difficult to integrate multi-frame information to produce a consistent registration sequence for downstream applications. We propose to combine the advantages of both representations. We utilize differential mask loss of the explicit mesh to obtain the coherent overall shape, while the details on the implicit surface are refined with the differentiable neural rendering. Meanwhile, the explicit mesh is updated periodically to adjust its topology changes, and a consistency loss is designed to match both representations. Compared with existing methods, SelfRecon can produce high-fidelity surfaces for arbitrary clothed humans with self-supervised optimization. Extensive experimental results demonstrate its effectiveness on real captured monocular videos. The source code is available at https://github.com/jby1993/SelfReconCode.", "abstracts": [ { "abstractType": "Regular", "content": "We propose SelfRecon, a clothed human body reconstruction method that combines implicit and explicit repre-sentations to recover space-time coherent geometries from a monocular self-rotating human video. Explicit methods require a predefined template mesh for a given sequence, while the template is hard to acquire for a specific subject. Meanwhile, the fixed topology limits the reconstruction accuracy and clothing types. Implicit representation supports arbitrary topology and can represent high-fidelity geometry shapes due to its continuous nature. However, it is difficult to integrate multi-frame information to produce a consistent registration sequence for downstream applications. We propose to combine the advantages of both representations. We utilize differential mask loss of the explicit mesh to obtain the coherent overall shape, while the details on the implicit surface are refined with the differentiable neural rendering. Meanwhile, the explicit mesh is updated periodically to adjust its topology changes, and a consistency loss is designed to match both representations. Compared with existing methods, SelfRecon can produce high-fidelity surfaces for arbitrary clothed humans with self-supervised optimization. Extensive experimental results demonstrate its effectiveness on real captured monocular videos. The source code is available at https://github.com/jby1993/SelfReconCode.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose SelfRecon, a clothed human body reconstruction method that combines implicit and explicit repre-sentations to recover space-time coherent geometries from a monocular self-rotating human video. Explicit methods require a predefined template mesh for a given sequence, while the template is hard to acquire for a specific subject. Meanwhile, the fixed topology limits the reconstruction accuracy and clothing types. Implicit representation supports arbitrary topology and can represent high-fidelity geometry shapes due to its continuous nature. However, it is difficult to integrate multi-frame information to produce a consistent registration sequence for downstream applications. We propose to combine the advantages of both representations. We utilize differential mask loss of the explicit mesh to obtain the coherent overall shape, while the details on the implicit surface are refined with the differentiable neural rendering. Meanwhile, the explicit mesh is updated periodically to adjust its topology changes, and a consistency loss is designed to match both representations. Compared with existing methods, SelfRecon can produce high-fidelity surfaces for arbitrary clothed humans with self-supervised optimization. Extensive experimental results demonstrate its effectiveness on real captured monocular videos. The source code is available at https://github.com/jby1993/SelfReconCode.", "fno": "694600f595", "keywords": [ "Avatars", "Clothing", "Computational Geometry", "Geometry", "Image Motion Analysis", "Image Reconstruction", "Image Registration", "Image Representation", "Image Sequences", "Optimisation", "Video Signal Processing", "Self Recon", "Digital Avatar", "Monocular Video", "Clothed Human Body Reconstruction Method", "Implicit Representations", "Space Time Coherent Geometries", "Self Rotating Human Video", "Explicit Methods", "Predefined Template Mesh", "Fixed Topology", "Reconstruction Accuracy", "Clothing Types", "Implicit Representation", "Arbitrary Topology", "High Fidelity Geometry Shapes", "Multiframe Information", "Consistent Registration Sequence", "Downstream Applications", "Differential Mask Loss", "Differentiable Neural Rendering", "Consistency Loss", "High Fidelity Surfaces", "Arbitrary Clothed Humans", "Monocular Videos", "Explicit Representations", "Geometry", "Surface Reconstruction", "Computer Vision", "Codes", "Shape", "Reconstruction Algorithms", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "University of Science and Technology of China", "fullName": "Boyi Jiang", "givenName": "Boyi", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China", "fullName": "Yang Hong", "givenName": "Yang", "surname": "Hong", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University", "fullName": "Hujun Bao", "givenName": "Hujun", "surname": "Bao", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China", "fullName": "Juyong Zhang", "givenName": "Juyong", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "5595-5605", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "694600f585", "articleId": "1H0NfF1YXw4", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600f606", "articleId": "1H1kx76ZAn6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tp/2006/02/i0328", "title": "Implicit Meshes for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2006/02/i0328/13rRUygT7o3", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g505", "title": "Learning Signed Distance Field for Multi-view Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g505/1BmFLjuiAKs", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0734", "title": "Dynamic Surface Function Networks for Clothed Human Bodies", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0734/1BmFU8kwG2s", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d835", "title": "Registering Explicit to Implicit: Towards High-Fidelity Garment mesh Reconstruction from Single Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d835/1H0Lx9QOs9i", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g270", "title": "Gradient-SDF: A Semi-Implicit Surface Representation for 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g270/1H0MXW1GTN6", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e319", "title": "Recovering Fine Details for Neural Implicit Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e319/1KxUSVbk6He", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d090", "title": "ARCH: Animatable Reconstruction of Clothed Humans", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09555381", "title": "SurRF: Unsupervised Multi-View Stereopsis by Learning Surface Radiance Field", "doi": null, "abstractUrl": "/journal/tp/2022/11/09555381/1xjQQdQGABG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b780", "title": "Temporal Consistency Loss for High Resolution Textured and Clothed 3D Human Reconstruction from Monocular Video", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b780/1yJYeTNJp1m", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i645", "title": "Dynamic Neural Radiance Fields for Monocular 4D Facial Avatar Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i645/1yeHVNYk40M", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1rHeKX6WcSc", "title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)", "acronym": "isctt", "groupId": "1840584", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1rHeLb3yn4I", "doi": "10.1109/ISCTT51595.2020.00035", "title": "Monocular Instance Level 3D Object Reconstruction based on Mesh R-CNN", "normalizedTitle": "Monocular Instance Level 3D Object Reconstruction based on Mesh R-CNN", "abstract": "In recent years we have witnessed the rapid improvement of algorithms and technologies in object detection, instance segmentation and 3d reconstruction. Since the development of the R-CNN model and various improvements that follows, it is now an easy task to separate objects from the environment. For 3d reconstruction, Mesh R-CNN and PiFUHD render objects close to their original geometry, and this leads us to develop a 3d object reconstruction system that can integrate and improve its performance based on two models. We find that Mesh R-CNN can be improved with the newest PointRend model that generates more accurate shapes than Mask R-CNN on which Mesh R-CNN is based, and we reach the conclusion that a Monocular Instance Level 3D Object Reconstruction is fully feasible. document is a “live” template. The various components of your paper [title, text, heads, etc.] are already defined on the style sheet, as illustrated by the portions given in this document.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years we have witnessed the rapid improvement of algorithms and technologies in object detection, instance segmentation and 3d reconstruction. Since the development of the R-CNN model and various improvements that follows, it is now an easy task to separate objects from the environment. For 3d reconstruction, Mesh R-CNN and PiFUHD render objects close to their original geometry, and this leads us to develop a 3d object reconstruction system that can integrate and improve its performance based on two models. We find that Mesh R-CNN can be improved with the newest PointRend model that generates more accurate shapes than Mask R-CNN on which Mesh R-CNN is based, and we reach the conclusion that a Monocular Instance Level 3D Object Reconstruction is fully feasible. document is a “live” template. The various components of your paper [title, text, heads, etc.] are already defined on the style sheet, as illustrated by the portions given in this document.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years we have witnessed the rapid improvement of algorithms and technologies in object detection, instance segmentation and 3d reconstruction. Since the development of the R-CNN model and various improvements that follows, it is now an easy task to separate objects from the environment. For 3d reconstruction, Mesh R-CNN and PiFUHD render objects close to their original geometry, and this leads us to develop a 3d object reconstruction system that can integrate and improve its performance based on two models. We find that Mesh R-CNN can be improved with the newest PointRend model that generates more accurate shapes than Mask R-CNN on which Mesh R-CNN is based, and we reach the conclusion that a Monocular Instance Level 3D Object Reconstruction is fully feasible. document is a “live” template. The various components of your paper [title, text, heads, etc.] are already defined on the style sheet, as illustrated by the portions given in this document.", "fno": "857500a158", "keywords": [ "Convolutional Neural Nets", "Image Reconstruction", "Image Segmentation", "Object Detection", "Rendering Computer Graphics", "Solid Modelling", "Mesh R CNN", "Pi FUHD Render Objects", "Mask R CNN", "Object Detection", "Monocular Instance Level 3 D Object Reconstruction", "Point Rend Model", "Instance Segmentation", "Solid Modeling", "Three Dimensional Displays", "Shape", "Transportation", "Object Detection", "Task Analysis", "Image Reconstruction", "Pattern Recognition", "Instance Segmentation", "Feature Extraction", "3 D Reconstruction" ], "authors": [ { "affiliation": "Shanghai Foreign Language School Affiliated to SISU,Shanghai,China", "fullName": "Yuyang Wu", "givenName": "Yuyang", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "isctt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2020", "issn": null, "isbn": "978-1-7281-8575-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "857500a153", "articleId": "1rHeNYVZW6Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "857500a164", "articleId": "1rHeOFtQfSw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032c980", "title": "Mask R-CNN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c980/12OmNrIJqCA", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a912", "title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a912/12OmNvD8RuE", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391b440", "title": "Fast R-CNN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391b440/12OmNx0RIMd", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a394", "title": "Faster R-CNN Features for Instance Search", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a394/12OmNxFJXDZ", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2023/02/09811396", "title": "IEMask R-CNN: Information-Enhanced Mask R-CNN", "doi": null, "abstractUrl": "/journal/bd/2023/02/09811396/1ECXz8Eaqli", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j784", "title": "Mesh R-CNN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j784/1hVlAhHbyIo", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090582", "title": "Recurrent R-CNN: Online Instance Mapping with context correlation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090582/1jIxy6xVCAE", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800k0545", "title": "Disp R-CNN: Stereo 3D Object Detection via Shape Prior Guided Instance Disparity Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800k0545/1m3nkDPTta8", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09419782", "title": "Shape Prior Guided Instance Disparity Estimation for 3D Object Detection", "doi": null, "abstractUrl": "/journal/tp/2022/09/09419782/1tcfD8NXdKM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f998", "title": "Self-Supervised 3D Mesh Reconstruction from Single Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f998/1yeMjdyhMTS", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uiluGq0Oo8", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uimbE1R1Ze", "doi": "10.1109/ICME51207.2021.9428299", "title": "Capturing Implicit Spatial Cues for Monocular 3d Hand Reconstruction", "normalizedTitle": "Capturing Implicit Spatial Cues for Monocular 3d Hand Reconstruction", "abstract": "With the development of the parameterized hand model (e.g. MANO), it is possible to reconstruct the 3D hand mesh from a single 2D hand image by learning a few hand model parameters, rather than estimating hundreds of vertices on the mesh. However, it is highly non-linear to learn these parameters from the 2D hand image, as there is no explicit spatial correspondence between these parameters and image pixels. In this paper, we successfully leverage the graph convolutional network (GCN) to capture implicit spatial cues for fitting the well-known MANO hand model, thus greatly improving the performance of monocular 3D hand reconstruction. Our proposed MANO-GCN establishes the spatial hand mesh and hand joints graph for learning MANO parameters, with node features propagated along edges to utilize the spatial information. Among all monocular 3D hand reconstruction methods with MANO hand model, MANO-GCN achieves state-of-the-art accuracy on public FreiHAND and HO-3D benchmarks, without any bells and whistles. Code is available at https://github.com/ChenJoya/manogcn.", "abstracts": [ { "abstractType": "Regular", "content": "With the development of the parameterized hand model (e.g. MANO), it is possible to reconstruct the 3D hand mesh from a single 2D hand image by learning a few hand model parameters, rather than estimating hundreds of vertices on the mesh. However, it is highly non-linear to learn these parameters from the 2D hand image, as there is no explicit spatial correspondence between these parameters and image pixels. In this paper, we successfully leverage the graph convolutional network (GCN) to capture implicit spatial cues for fitting the well-known MANO hand model, thus greatly improving the performance of monocular 3D hand reconstruction. Our proposed MANO-GCN establishes the spatial hand mesh and hand joints graph for learning MANO parameters, with node features propagated along edges to utilize the spatial information. Among all monocular 3D hand reconstruction methods with MANO hand model, MANO-GCN achieves state-of-the-art accuracy on public FreiHAND and HO-3D benchmarks, without any bells and whistles. Code is available at https://github.com/ChenJoya/manogcn.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the development of the parameterized hand model (e.g. MANO), it is possible to reconstruct the 3D hand mesh from a single 2D hand image by learning a few hand model parameters, rather than estimating hundreds of vertices on the mesh. However, it is highly non-linear to learn these parameters from the 2D hand image, as there is no explicit spatial correspondence between these parameters and image pixels. In this paper, we successfully leverage the graph convolutional network (GCN) to capture implicit spatial cues for fitting the well-known MANO hand model, thus greatly improving the performance of monocular 3D hand reconstruction. Our proposed MANO-GCN establishes the spatial hand mesh and hand joints graph for learning MANO parameters, with node features propagated along edges to utilize the spatial information. Among all monocular 3D hand reconstruction methods with MANO hand model, MANO-GCN achieves state-of-the-art accuracy on public FreiHAND and HO-3D benchmarks, without any bells and whistles. Code is available at https://github.com/ChenJoya/manogcn.", "fno": "09428299", "keywords": [ "Feature Extraction", "Graph Theory", "Image Reconstruction", "Learning Artificial Intelligence", "Pose Estimation", "MANO Hand Model", "Spatial Hand Mesh", "Hand Joints Graph", "MANO Parameters", "Spatial Information", "Hand Reconstruction Methods", "MANO GCN Achieves State Of The Art Accuracy", "Implicit Spatial Cues", "Monocular 3", "Parameterized Hand Model", "E G MANO", "3 D Hand Mesh", "Single 2 D Hand Image", "Hand Model Parameters", "Explicit Spatial Correspondence", "Image Pixels", "Convolutional Codes", "Solid Modeling", "Three Dimensional Displays", "Conferences", "Fitting", "Reconstruction Algorithms", "Benchmark Testing", "Monocular 3 D Hand Reconstruction", "Parameterized Hand Model", "Graph Convolutional Network" ], "authors": [ { "affiliation": "Chinese Academy of Sciences,Hefei Institutes of Physical Science", "fullName": "Qi Wu", "givenName": "Qi", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China", "fullName": "Joya Chen", "givenName": "Joya", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Hefei Institutes of Physical Science", "fullName": "Xu Zhou", "givenName": "Xu", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Hefei Institutes of Physical Science", "fullName": "Zhiming Yao", "givenName": "Zhiming", "surname": "Yao", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Hefei Institutes of Physical Science", "fullName": "Xianjun Yang", "givenName": "Xianjun", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2021", "issn": null, "isbn": "978-1-6654-3864-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09428246", "articleId": "1uimeerujtK", "__typename": "AdjacentArticleType" }, "next": { "fno": "09428172", "articleId": "1uilNnr2T6M", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200m2397", "title": "Reconstructing Hand-Object Interactions in the Wild", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2397/1BmEAsHFiMM", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1457", "title": "Modulated Graph Convolutional Network for 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1457/1BmH6KAmBaM", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c751", "title": "Interacting Attention Graph for Single Image Two-Hand Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c751/1H1jycJB636", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0501", "title": "LISA: Learning Implicit Shape and Appearance of Hands", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0501/1H1kQC1azuM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2023/4544/0/10042643", "title": "STr-GCN: Dual Spatial Graph Convolutional Network and Transformer Graph Encoder for 3D Hand Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2023/10042643/1KOv2eD07Kg", "parentPublication": { "id": "proceedings/fg/2023/4544/0", "title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a646", "title": "Dynamic Spatio-Temporal Feature Learning via Graph Convolution in 3D Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a646/1gAwXPaWdeU", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c354", "title": "End-to-End Hand Mesh Recovery From a Monocular RGB Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c354/1hVlILCLZPG", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100c266", "title": "SCAT: Stride Consistency with Auto-regressive regressor and Transformer for hand pose estimation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100c266/1yNhriirGcE", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a099", "title": "SAR: Spatial-Aware Regression for 3D Hand Pose and Mesh Reconstruction from a Monocular RGB Image", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a099/1yeD3Eaz8By", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a432", "title": "Monocular 3D Reconstruction of Interacting Hands via Collision-Aware Factorized Refinements", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a432/1zWEphTfS4o", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeHRUzCkF2", "doi": "10.1109/CVPR46437.2021.00178", "title": "Learning monocular 3D reconstruction of articulated categories from motion", "normalizedTitle": "Learning monocular 3D reconstruction of articulated categories from motion", "abstract": "Monocular 3D reconstruction of articulated object categories is challenging due to the lack of training data and the inherent ill-posedness of the problem. In this work we use video self-supervision, forcing the consistency of consecutive 3D reconstructions by a motion-based cycle loss. This largely improves both optimization-based and learning-based 3D mesh reconstruction. We further introduce an interpretable model of 3D template deformations that controls a 3D surface through the displacement of a small number of local, learnable handles. We formulate this operation as a structured layer relying on meshlaplacian regularization and show that it can be trained in an end-to-end manner. We finally introduce a per-sample numerical optimisation approach that jointly optimises over mesh displacements and cameras within a video, boosting accuracy both for training and also as test time post-processing.While relying exclusively on a small set of videos collected per category for supervision, we obtain state-of-the-art reconstructions with diverse shapes, viewpoints and textures for multiple articulated object categories. Supplementary materials, code, and videos are provided on the project page: https://fkokkinos.github.io/video_3d_reconstruction/.", "abstracts": [ { "abstractType": "Regular", "content": "Monocular 3D reconstruction of articulated object categories is challenging due to the lack of training data and the inherent ill-posedness of the problem. In this work we use video self-supervision, forcing the consistency of consecutive 3D reconstructions by a motion-based cycle loss. This largely improves both optimization-based and learning-based 3D mesh reconstruction. We further introduce an interpretable model of 3D template deformations that controls a 3D surface through the displacement of a small number of local, learnable handles. We formulate this operation as a structured layer relying on meshlaplacian regularization and show that it can be trained in an end-to-end manner. We finally introduce a per-sample numerical optimisation approach that jointly optimises over mesh displacements and cameras within a video, boosting accuracy both for training and also as test time post-processing.While relying exclusively on a small set of videos collected per category for supervision, we obtain state-of-the-art reconstructions with diverse shapes, viewpoints and textures for multiple articulated object categories. Supplementary materials, code, and videos are provided on the project page: https://fkokkinos.github.io/video_3d_reconstruction/.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Monocular 3D reconstruction of articulated object categories is challenging due to the lack of training data and the inherent ill-posedness of the problem. In this work we use video self-supervision, forcing the consistency of consecutive 3D reconstructions by a motion-based cycle loss. This largely improves both optimization-based and learning-based 3D mesh reconstruction. We further introduce an interpretable model of 3D template deformations that controls a 3D surface through the displacement of a small number of local, learnable handles. We formulate this operation as a structured layer relying on meshlaplacian regularization and show that it can be trained in an end-to-end manner. We finally introduce a per-sample numerical optimisation approach that jointly optimises over mesh displacements and cameras within a video, boosting accuracy both for training and also as test time post-processing.While relying exclusively on a small set of videos collected per category for supervision, we obtain state-of-the-art reconstructions with diverse shapes, viewpoints and textures for multiple articulated object categories. Supplementary materials, code, and videos are provided on the project page: https://fkokkinos.github.io/video_3d_reconstruction/.", "fno": "450900b737", "keywords": [ "Cameras", "Feature Extraction", "Image Motion Analysis", "Image Reconstruction", "Image Representation", "Image Segmentation", "Image Sequences", "Learning Artificial Intelligence", "Medical Image Processing", "Mesh Generation", "Object Detection", "Optimisation", "Solid Modelling", "Stereo Image Processing", "Video Signal Processing", "Optimization Based", "3 D Mesh Reconstruction", "3 D Template Deformations", "State Of The Art Reconstructions", "Multiple Articulated Object Categories", "Monocular 3 D Reconstruction", "Articulated Categories", "Training Data", "Consecutive 3 D Reconstructions", "Motion Based Cycle Loss", "Deep Learning", "Training", "Surface Reconstruction", "Solid Modeling", "Three Dimensional Displays", "Shape", "Training Data" ], "authors": [ { "affiliation": "University College London", "fullName": "Filippos Kokkinos", "givenName": "Filippos", "surname": "Kokkinos", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Iasonas Kokkinos", "givenName": "Iasonas", "surname": "Kokkinos", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "1737-1746", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeHRRdgLbG", "name": "pcvpr202145090-09577348s1-mm_450900b737.zip", "size": "1.27 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577348s1-mm_450900b737.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900b726", "articleId": "1yeIwV5gMHC", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900b747", "articleId": "1yeLq17PHDG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2012/2216/0/06460265", "title": "Camera-less articulated trajectory reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460265/12OmNBOll2F", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459403", "title": "Template-free monocular reconstruction of deformable surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459403/12OmNqJHFsI", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2011/1101/0/06126243", "title": "3D reconstruction of a smooth articulated trajectory from a monocular image sequence", "doi": null, "abstractUrl": "/proceedings-article/iccv/2011/06126243/12OmNwwuDS6", "parentPublication": { "id": "proceedings/iccv/2011/1101/0", "title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/2/114320230", "title": "Uncalibrated Motion Capture Exploiting Articulated Structure Constraints", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114320230/12OmNyYDDKC", "parentPublication": { "id": "proceedings/iccv/2001/1143/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/08/07451280", "title": "3D Reconstruction of Human Motion from Monocular Image Sequences", "doi": null, "abstractUrl": "/journal/tp/2016/08/07451280/13rRUxYrbVS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f742", "title": "Neural Articulated Radiance Field", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f742/1BmFoeDN55e", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1709", "title": "DensePose 3D: Lifting Canonical Surface Maps of Articulated Objects to the Third Dimension", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1709/1BmFyrOUHhS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3180", "title": "Differentiable Dynamics for Articulated 3d Human Motion Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3180/1H0LC9fRPBm", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600b496", "title": "Photorealistic Monocular 3D Reconstruction of Humans Wearing Clothing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600b496/1H0LbGobg6A", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a846", "title": "SG-NN: Sparse Generative Neural Networks for Self-Supervised Scene Completion of RGB-D Scans", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a846/1m3nCYyhriw", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeKZKrc88w", "doi": "10.1109/CVPR46437.2021.01534", "title": "NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video", "normalizedTitle": "NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video", "abstract": "We present a novel framework named NeuralRecon for real-time 3D scene reconstruction from a monocular video. Unlike previous methods that estimate single-view depth maps separately on each key-frame and fuse them later, we propose to directly reconstruct local surfaces represented as sparse TSDF volumes for each video fragment sequentially by a neural network. A learning-based TSDF fusion module based on gated recurrent units is used to guide the network to fuse features from previous fragments. This de-sign allows the network to capture local smoothness prior and global shape prior of 3D surfaces when sequentially reconstructing the surfaces, resulting in accurate, coherent, and real-time surface reconstruction. The experiments on ScanNet and 7-Scenes datasets show that our system outperforms state-of-the-art methods in terms of both ac-curacy and speed. To the best of our knowledge, this is the first learning-based system that is able to reconstruct dense coherent 3D geometry in real-time. Code is available at the project page: https://zju3dv.github.io/neuralrecon/.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel framework named NeuralRecon for real-time 3D scene reconstruction from a monocular video. Unlike previous methods that estimate single-view depth maps separately on each key-frame and fuse them later, we propose to directly reconstruct local surfaces represented as sparse TSDF volumes for each video fragment sequentially by a neural network. A learning-based TSDF fusion module based on gated recurrent units is used to guide the network to fuse features from previous fragments. This de-sign allows the network to capture local smoothness prior and global shape prior of 3D surfaces when sequentially reconstructing the surfaces, resulting in accurate, coherent, and real-time surface reconstruction. The experiments on ScanNet and 7-Scenes datasets show that our system outperforms state-of-the-art methods in terms of both ac-curacy and speed. To the best of our knowledge, this is the first learning-based system that is able to reconstruct dense coherent 3D geometry in real-time. Code is available at the project page: https://zju3dv.github.io/neuralrecon/.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel framework named NeuralRecon for real-time 3D scene reconstruction from a monocular video. Unlike previous methods that estimate single-view depth maps separately on each key-frame and fuse them later, we propose to directly reconstruct local surfaces represented as sparse TSDF volumes for each video fragment sequentially by a neural network. A learning-based TSDF fusion module based on gated recurrent units is used to guide the network to fuse features from previous fragments. This de-sign allows the network to capture local smoothness prior and global shape prior of 3D surfaces when sequentially reconstructing the surfaces, resulting in accurate, coherent, and real-time surface reconstruction. The experiments on ScanNet and 7-Scenes datasets show that our system outperforms state-of-the-art methods in terms of both ac-curacy and speed. To the best of our knowledge, this is the first learning-based system that is able to reconstruct dense coherent 3D geometry in real-time. Code is available at the project page: https://zju3dv.github.io/neuralrecon/.", "fno": "450900p5593", "keywords": [ "Computational Geometry", "Image Fusion", "Image Reconstruction", "Image Representation", "Learning Artificial Intelligence", "Real Time Systems", "Recurrent Neural Nets", "Stereo Image Processing", "Video Signal Processing", "Dense Coherent 3 D Geometry", "Monocular Video", "Sparse TSDF Volumes", "Video Fragment", "Neural Network", "Gated Recurrent Units", "7 Scenes Dataset", "Real Time Coherent 3 D Reconstruction", "Real Time 3 D Scene Reconstruction", "Learning Based TSDF Fusion Module", "Real Time Surface Reconstruction", "Single View Depth Maps", "Neural Recon", "Feature Fusion", "Scan Net Dataset", "Geometry", "Training", "Surface Reconstruction", "Three Dimensional Displays", "Fuses", "Shape", "Streaming Media" ], "authors": [ { "affiliation": "Zhejiang University", "fullName": "Jiaming Sun", "givenName": "Jiaming", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University", "fullName": "Yiming Xie", "givenName": "Yiming", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University", "fullName": "Linghao Chen", "givenName": "Linghao", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University", "fullName": "Xiaowei Zhou", "givenName": "Xiaowei", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University", "fullName": "Hujun Bao", "givenName": "Hujun", "surname": "Bao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "15593-15602", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "450900p5582", "articleId": "1yeIAW0lsLm", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900p5603", "articleId": "1yeKvBK8SPe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2009/4420/0/05459403", "title": "Template-free monocular reconstruction of deformable surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459403/12OmNqJHFsI", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/06/06186734", "title": "Monocular 3D Reconstruction of Locally Textured Surfaces", "doi": null, "abstractUrl": "/journal/tp/2012/06/06186734/13rRUIIVllA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/09/07575643", "title": "Dense Semantic 3D Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2017/09/07575643/13rRUwInvzG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b413", "title": "3D Reconstruction and Texture Optimization Using a Sparse Set of RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b413/18j8FdScGbe", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200q6066", "title": "VolumeFusion: Deep Depth Fusion for 3D Scene Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200q6066/1BmEGapQz5e", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f595", "title": "SelfRecon: Self Reconstruction Your Digital Avatar from Monocular Video", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f595/1H0NDIYvq0g", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/08/09007740", "title": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera", "doi": null, "abstractUrl": "/journal/tp/2021/08/09007740/1hGqrsQbjPO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600c876", "title": "IsMo-GAN: Adversarial Learning for Monocular Non-Rigid 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600c876/1iTvt2EK7m0", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i928", "title": "DI-Fusion: Online Implicit 3D Reconstruction with Deep Priors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i928/1yeLpskgFXi", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a320", "title": "VoRTX: Volumetric 3D Reconstruction With Transformers for Voxelwise View Selection and Fusion", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a320/1zWE7Pmgyly", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKa5Tk", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNvkYxa1", "doi": "10.1109/ICME.2008.4607743", "title": "Audio and video signatures for synchronization", "normalizedTitle": "Audio and video signatures for synchronization", "abstract": "We propose a framework based on signatures extracted from audio and video streams for automatically measuring and maintaining synchronization between the two streams. The audio signature is based on projections of a coarse representation of the spectrogram onto random vectors. The video signature is based on projections of a coarse representation of the difference image between two consecutive frames onto random vectors. The time alignment present at the signature generator between the two streams is recorded by combining audio and video signatures into a combined synchronization signature. At the detector after video and audio streams go through different processing operations, we extract the signatures again. The signatures extracted before and after processing from the audio and the video are compared independently using a Hamming distance based correlator to estimate the relative misalignment introduced due to processing in each of the streams. Then, the estimated relative misalignment between the audio and video streams is used to preserve the same alignment between the streams that was present before processing. Our experimental results show that we can achieve > 93.0% accuracy in synchronization.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a framework based on signatures extracted from audio and video streams for automatically measuring and maintaining synchronization between the two streams. The audio signature is based on projections of a coarse representation of the spectrogram onto random vectors. The video signature is based on projections of a coarse representation of the difference image between two consecutive frames onto random vectors. The time alignment present at the signature generator between the two streams is recorded by combining audio and video signatures into a combined synchronization signature. At the detector after video and audio streams go through different processing operations, we extract the signatures again. The signatures extracted before and after processing from the audio and the video are compared independently using a Hamming distance based correlator to estimate the relative misalignment introduced due to processing in each of the streams. Then, the estimated relative misalignment between the audio and video streams is used to preserve the same alignment between the streams that was present before processing. Our experimental results show that we can achieve > 93.0% accuracy in synchronization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a framework based on signatures extracted from audio and video streams for automatically measuring and maintaining synchronization between the two streams. The audio signature is based on projections of a coarse representation of the spectrogram onto random vectors. The video signature is based on projections of a coarse representation of the difference image between two consecutive frames onto random vectors. The time alignment present at the signature generator between the two streams is recorded by combining audio and video signatures into a combined synchronization signature. At the detector after video and audio streams go through different processing operations, we extract the signatures again. The signatures extracted before and after processing from the audio and the video are compared independently using a Hamming distance based correlator to estimate the relative misalignment introduced due to processing in each of the streams. Then, the estimated relative misalignment between the audio and video streams is used to preserve the same alignment between the streams that was present before processing. Our experimental results show that we can achieve > 93.0% accuracy in synchronization.", "fno": "04607743", "keywords": [ "Audio Signal Processing", "Synchronisation", "Video Signal Processing", "Audio Signature", "Video Signatures", "Synchronization", "Spectrogram Onto Random Vectors", "Difference Image", "Consecutive Frames Onto Random Vectors", "Signature Generator", "Hamming Distance", "Streaming Media", "Feature Extraction", "Delay", "Hamming Distance", "Correlators", "Robustness", "Synchronization" ], "authors": [ { "affiliation": "Dolby Laboratories Inc, 100 Potrero Ave, San Francisco, CA, USA", "fullName": "Regunathan Radhakrishnan", "givenName": "Regunathan", "surname": "Radhakrishnan", "__typename": "ArticleAuthorType" }, { "affiliation": "Dolby Laboratories Inc, 100 Potrero Ave, San Francisco, CA, USA", "fullName": "Kent Terry", "givenName": "Kent", "surname": "Terry", "__typename": "ArticleAuthorType" }, { "affiliation": "Dolby Laboratories Inc, 100 Potrero Ave, San Francisco, CA, USA", "fullName": "Claus Bauer", "givenName": "Claus", "surname": "Bauer", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-06-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1945-7871", "isbn": "978-1-4244-2570-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04607742", "articleId": "12OmNAYGluT", "__typename": "AdjacentArticleType" }, "next": { "fno": "04607744", "articleId": "12OmNzEVRUG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wowmom/2011/0352/0/05986385", "title": "Distributed audio synchronization scheme using audio endpoint inWASNs", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2011/05986385/12OmNqyUUKd", "parentPublication": { "id": "proceedings/wowmom/2011/0352/0", "title": "2011 IEEE International Symposium on a World of Wireless, Mobile and Multimedia Networks", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/2/01394404", "title": "Scene retrieval with sign sequence matching based on video and audio features", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394404/12OmNwkhTfy", "parentPublication": { "id": "proceedings/icme/2004/8603/2", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521400", "title": "Audio, video and audio-visual signatures for short video clip detection: experiments on Trecvid2003", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521400/12OmNxR5UKg", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2016/2020/0/07498238", "title": "Hobbes3: Dynamic generation of variable-length signatures for efficient approximate subsequence mappings", "doi": null, "abstractUrl": "/proceedings-article/icde/2016/07498238/12OmNxvwp0F", "parentPublication": { "id": "proceedings/icde/2016/2020/0", "title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ftdcs/1993/4430/0/00344170", "title": "A taxonomy on multimedia synchronization", "doi": null, "abstractUrl": "/proceedings-article/ftdcs/1993/00344170/12OmNyTOslv", "parentPublication": { "id": "proceedings/ftdcs/1993/4430/0", "title": "1993 4th Workshop on Future Trends of Distributed Computing Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1995/6935/0/69350594", "title": "Media synchronization protocols for packet audio-video system on multimedia information networks", "doi": null, "abstractUrl": "/proceedings-article/hicss/1995/69350594/12OmNyvoX9K", "parentPublication": { "id": "proceedings/hicss/1995/6935/0", "title": "28th Hawaii International Conference on System Sciences (HICSS'95)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284763", "title": "An Efficient Audio-Video Synchronization Methodology", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284763/12OmNzWfpaz", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/3/290930026", "title": "New Synchronization Scheme between Audio and Video", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290930026/12OmNzuIjs6", "parentPublication": { "id": "proceedings/snpd/2007/2909/3", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a128", "title": "Frame Synchronization of Live Video Streams Using Visible Light Communication", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a128/12OmNzwHv9q", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2023/2056/0/205600a565", "title": "A Simple and Efficient method for Dubbed Audio Sync Detection using Compressive Sensing", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2023/205600a565/1KzyZw1a4AU", "parentPublication": { "id": "proceedings/wacvw/2023/2056/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkEU4f", "title": "2011 IEEE International Conference on Multimedia and Expo", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzvz6I2", "doi": "10.1109/ICME.2011.6011841", "title": "Automatic consumer video summarization by audio and visual analysis", "normalizedTitle": "Automatic consumer video summarization by audio and visual analysis", "abstract": "Video summarization provides a condensed version of a video stream by analyzing the video content. Automatic summarization of consumer videos is an important tool that facilitates efficient browsing, searching, and album creation in large consumer video collections. This paper studies automatic video summarization in the consumer domain where most previous methods cannot be easily applied due to the challenging issues for content analysis, i.e., consumer videos are captured with uncontrolled conditions such as uneven illumination, clutter, and large camera motion, and with poor-quality soundtrack as a mix of multiple sound sources under severe noise. To pursue reliable summarization, a case study with actual consumer users is conducted, from which a set of consumer-oriented guidelines is obtained. The guidelines reflect the high-level semantic rules, in both visual and audio aspects, which are recognized by consumers as important to produce good video summaries. Following these guidelines, an automatic video summarization algorithm is developed where both visual and audio information are used to generate improved summaries. To the best of our knowledge, this is a first systematic study on automatic summarization of consumer-quality videos. Experimental evaluations from consumer subjects show the effectiveness of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "Video summarization provides a condensed version of a video stream by analyzing the video content. Automatic summarization of consumer videos is an important tool that facilitates efficient browsing, searching, and album creation in large consumer video collections. This paper studies automatic video summarization in the consumer domain where most previous methods cannot be easily applied due to the challenging issues for content analysis, i.e., consumer videos are captured with uncontrolled conditions such as uneven illumination, clutter, and large camera motion, and with poor-quality soundtrack as a mix of multiple sound sources under severe noise. To pursue reliable summarization, a case study with actual consumer users is conducted, from which a set of consumer-oriented guidelines is obtained. The guidelines reflect the high-level semantic rules, in both visual and audio aspects, which are recognized by consumers as important to produce good video summaries. Following these guidelines, an automatic video summarization algorithm is developed where both visual and audio information are used to generate improved summaries. To the best of our knowledge, this is a first systematic study on automatic summarization of consumer-quality videos. Experimental evaluations from consumer subjects show the effectiveness of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Video summarization provides a condensed version of a video stream by analyzing the video content. Automatic summarization of consumer videos is an important tool that facilitates efficient browsing, searching, and album creation in large consumer video collections. This paper studies automatic video summarization in the consumer domain where most previous methods cannot be easily applied due to the challenging issues for content analysis, i.e., consumer videos are captured with uncontrolled conditions such as uneven illumination, clutter, and large camera motion, and with poor-quality soundtrack as a mix of multiple sound sources under severe noise. To pursue reliable summarization, a case study with actual consumer users is conducted, from which a set of consumer-oriented guidelines is obtained. The guidelines reflect the high-level semantic rules, in both visual and audio aspects, which are recognized by consumers as important to produce good video summaries. Following these guidelines, an automatic video summarization algorithm is developed where both visual and audio information are used to generate improved summaries. To the best of our knowledge, this is a first systematic study on automatic summarization of consumer-quality videos. Experimental evaluations from consumer subjects show the effectiveness of our approach.", "fno": "06011841", "keywords": [ "Visualization", "Face", "Speech", "Streaming Media", "Semantics", "Guidelines", "Image Color Analysis", "Video Summarization", "Consumer Domain", "Audio Summarization" ], "authors": [ { "affiliation": "Corporate Research and Engineering, Eastman Kodak Company, Rochester, NY, USA", "fullName": "Wei Jiang", "givenName": "Wei", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Electrical Engineering, Columbia University, New York, USA", "fullName": "Courtenay Cotton", "givenName": "Courtenay", "surname": "Cotton", "__typename": "ArticleAuthorType" }, { "affiliation": "Corporate Research and Engineering, Eastman Kodak Company, Rochester, NY, USA", "fullName": "Alexander C. Loui", "givenName": "Alexander C.", "surname": "Loui", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2011", "issn": "1945-7871", "isbn": "978-1-61284-348-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06011840", "articleId": "12OmNzd7bDp", "__typename": "AdjacentArticleType" }, "next": { "fno": "06011842", "articleId": "12OmNCcKQDK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2012/1611/0/06239341", "title": "A consumer video search system by audio-visual concept classification", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239341/12OmNApLGue", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2010/7491/0/05582561", "title": "Automatic summarization of audio-visual soccer feeds", "doi": null, "abstractUrl": "/proceedings-article/icme/2010/05582561/12OmNqIQS4W", "parentPublication": { "id": "proceedings/icme/2010/7491/0", "title": "2010 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926698", "title": "Semantic Text Summarization of Long Videos", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926698/12OmNqNG3ga", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019530", "title": "Real time video summarization on mobile platform", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019530/12OmNrJ11DG", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imis/2015/8873/0/8873a388", "title": "A New Uighur Automatic Summarization Method", "doi": null, "abstractUrl": "/proceedings-article/imis/2015/8873a388/12OmNwkhTkt", "parentPublication": { "id": "proceedings/imis/2015/8873/0", "title": "2015 9th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a249", "title": "Automatic Video Content Summarization Using Geospatial Mosaics of Aerial Imagery", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a249/12OmNyjcczn", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/1/195010104", "title": "Automatic Video Summarization by Graph Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195010104/12OmNynJMPs", "parentPublication": { "id": "proceedings/iccv/2003/1950/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2014/7981/0/7981b758", "title": "Automatic Movie Summarization Based on the Visual-Audio Features", "doi": null, "abstractUrl": "/proceedings-article/cse/2014/7981b758/12OmNzkuKAD", "parentPublication": { "id": "proceedings/cse/2014/7981/0", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c513", "title": "Quasi Real-Time Summarization for Consumer Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c513/12OmNznCkYL", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a452", "title": "Demystifying Multi-Faceted Video Summarization: Tradeoff Between Diversity, Representation, Coverage and Importance", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a452/18j8JkNH4TC", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmG7WKSuR2", "doi": "10.1109/ICCV48922.2021.01350", "title": "PIRenderer: Controllable Portrait Image Generation via Semantic Neural Rendering", "normalizedTitle": "PIRenderer: Controllable Portrait Image Generation via Semantic Neural Rendering", "abstract": "Generating portrait images by controlling the motions of existing faces is an important task of great consequence to social media industries. For easy use and intuitive control, semantically meaningful and fully disentangled parameters should be used as modifications. However, many existing techniques do not provide such fine-grained controls or use indirect editing methods i.e. mimic motions of other individuals. In this paper, a Portrait Image Neural Renderer (PIRenderer) is proposed to control the face motions with the parameters of three-dimensional morphable face models (3DMMs). The proposed model can generate photo-realistic portrait images with accurate movements according to intuitive modifications. Experiments on both direct and indirect editing tasks demonstrate the superiority of this model. Meanwhile, we further extend this model to tackle the audio-driven facial reenactment task by extracting sequential motions from audio inputs. We show that our model can generate coherent videos with convincing movements from only a single reference image and a driving audio stream. Our source code is available at https://github.com/RenYurui/PIRender.", "abstracts": [ { "abstractType": "Regular", "content": "Generating portrait images by controlling the motions of existing faces is an important task of great consequence to social media industries. For easy use and intuitive control, semantically meaningful and fully disentangled parameters should be used as modifications. However, many existing techniques do not provide such fine-grained controls or use indirect editing methods i.e. mimic motions of other individuals. In this paper, a Portrait Image Neural Renderer (PIRenderer) is proposed to control the face motions with the parameters of three-dimensional morphable face models (3DMMs). The proposed model can generate photo-realistic portrait images with accurate movements according to intuitive modifications. Experiments on both direct and indirect editing tasks demonstrate the superiority of this model. Meanwhile, we further extend this model to tackle the audio-driven facial reenactment task by extracting sequential motions from audio inputs. We show that our model can generate coherent videos with convincing movements from only a single reference image and a driving audio stream. Our source code is available at https://github.com/RenYurui/PIRender.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Generating portrait images by controlling the motions of existing faces is an important task of great consequence to social media industries. For easy use and intuitive control, semantically meaningful and fully disentangled parameters should be used as modifications. However, many existing techniques do not provide such fine-grained controls or use indirect editing methods i.e. mimic motions of other individuals. In this paper, a Portrait Image Neural Renderer (PIRenderer) is proposed to control the face motions with the parameters of three-dimensional morphable face models (3DMMs). The proposed model can generate photo-realistic portrait images with accurate movements according to intuitive modifications. Experiments on both direct and indirect editing tasks demonstrate the superiority of this model. Meanwhile, we further extend this model to tackle the audio-driven facial reenactment task by extracting sequential motions from audio inputs. We show that our model can generate coherent videos with convincing movements from only a single reference image and a driving audio stream. Our source code is available at https://github.com/RenYurui/PIRender.", "fno": "281200n3739", "keywords": [ "Industries", "Solid Modeling", "Social Networking Online", "Semantics", "Neural Networks", "Streaming Media", "Rendering Computer Graphics", "Image And Video Synthesis", "Computational Photography", "Faces", "Image And Video Manipulation Detection And Integrity Methods", "Vision Language" ], "authors": [ { "affiliation": "Peking University,School of Electronics and Computer Engineering", "fullName": "Yurui Ren", "givenName": "Yurui", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University,School of Electronics and Computer Engineering", "fullName": "Ge Li", "givenName": "Ge", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University,School of Electronics and Computer Engineering", "fullName": "Yuanqi Chen", "givenName": "Yuanqi", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University,Advanced Institute of Information Technology", "fullName": "Thomas H. Li", "givenName": "Thomas H.", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Tencent America", "fullName": "Shan Liu", "givenName": "Shan", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "13739-13748", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200n3729", "articleId": "1BmEnviC1lm", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200n3749", "articleId": "1BmKNCO62iI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2004/8603/1/01394118", "title": "Content based editing of semantic video metadata", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394118/12OmNylKB5A", "parentPublication": { "id": "proceedings/icme/2004/8603/1", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2016/2305/0/2305a156", "title": "Artist-Drawing Inspired Automatic Sketch Portrait Rendering", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2016/2305a156/12OmNylsZOF", "parentPublication": { "id": "proceedings/nicoint/2016/2305/0", "title": "2016 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/01/mcg2018010077", "title": "Parametric Reshaping of Portrait Images for Weight-change", "doi": null, "abstractUrl": "/magazine/cg/2018/01/mcg2018010077/13rRUwciPh2", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2022/5824/0/582400a701", "title": "Multi-View Motion Synthesis via Applying Rotated Dual-Pixel Blur Kernels", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2022/582400a701/1B12z383zI4", "parentPublication": { "id": "proceedings/wacvw/2022/5824/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a782", "title": "Neural Video Portrait Relighting in Real-time via Consistency Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a782/1BmEMHlM0Du", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09784910", "title": "DrawingInStyles: Portrait Image Generation and Editing with Spatially Conditioned StyleGAN", "doi": null, "abstractUrl": "/journal/tg/5555/01/09784910/1DPaE3QYx68", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0332", "title": "RigNeRF: Fully Controllable Neural 3D Portraits", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0332/1H1mHD2RrtS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0/772600a106", "title": "Multi-dimensional User-sensitive Information Portrait for Social Networks", "doi": null, "abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2022/772600a106/1M4rdnpjccg", "parentPublication": { "id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0", "title": "2022 IEEE 21st International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199560", "title": "Photorealistic Audio-driven Video Portraits", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199560/1ncguu1AZdS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455949", "title": "Multi-Style Artistic Portrait Drawing Generation", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455949/1uCgn6EsRLG", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0Lqq0HMVa", "doi": "10.1109/CVPR52688.2022.00341", "title": "Audio-driven Neural Gesture Reenactment with Video Motion Graphs", "normalizedTitle": "Audio-driven Neural Gesture Reenactment with Video Motion Graphs", "abstract": "Human speech is often accompanied by body gestures including arm and hand gestures. We present a method that reenacts a high-quality video with gestures matching a target speech audio. The key idea of our method is to split and re-assemble clips from a reference video through a novel video motion graph encoding valid transitions between clips. To seamlessly connect different clips in the reenactment, we propose a pose-aware video blending network which synthesizes video frames around the stitched frames between two clips. Moreover, we developed an audio-based gesture searching algorithm to find the optimal order of the reenacted frames. Our system generates reen-actments that are consistent with both the audio rhythms and the speech content. We evaluate our synthesized video quality quantitatively, qualitatively, and with user studies, demonstrating that our method produces videos of much higher quality and consistency with the target audio compared to previous work and baselines. Our project page https://github.com/yzhou359/vid-reenact includes code and data.", "abstracts": [ { "abstractType": "Regular", "content": "Human speech is often accompanied by body gestures including arm and hand gestures. We present a method that reenacts a high-quality video with gestures matching a target speech audio. The key idea of our method is to split and re-assemble clips from a reference video through a novel video motion graph encoding valid transitions between clips. To seamlessly connect different clips in the reenactment, we propose a pose-aware video blending network which synthesizes video frames around the stitched frames between two clips. Moreover, we developed an audio-based gesture searching algorithm to find the optimal order of the reenacted frames. Our system generates reen-actments that are consistent with both the audio rhythms and the speech content. We evaluate our synthesized video quality quantitatively, qualitatively, and with user studies, demonstrating that our method produces videos of much higher quality and consistency with the target audio compared to previous work and baselines. Our project page https://github.com/yzhou359/vid-reenact includes code and data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human speech is often accompanied by body gestures including arm and hand gestures. We present a method that reenacts a high-quality video with gestures matching a target speech audio. The key idea of our method is to split and re-assemble clips from a reference video through a novel video motion graph encoding valid transitions between clips. To seamlessly connect different clips in the reenactment, we propose a pose-aware video blending network which synthesizes video frames around the stitched frames between two clips. Moreover, we developed an audio-based gesture searching algorithm to find the optimal order of the reenacted frames. Our system generates reen-actments that are consistent with both the audio rhythms and the speech content. We evaluate our synthesized video quality quantitatively, qualitatively, and with user studies, demonstrating that our method produces videos of much higher quality and consistency with the target audio compared to previous work and baselines. Our project page https://github.com/yzhou359/vid-reenact includes code and data.", "fno": "694600d408", "keywords": [ "Gesture Recognition", "Video Signal Processing", "Audio Rhythms", "Speech Content", "Synthesized Video Quality", "Target Audio", "Audio Driven Neural Gesture Reenactment", "Video Motion Graph", "Human Speech", "Body Gestures", "Hand Gestures", "High Quality Video", "Target Speech Audio", "Re Assemble Clips", "Reference Video", "Different Clips", "Pose Aware Video Blending Network", "Video Frames", "Stitched Frames", "Reenacted Frames", "Computer Vision", "Codes", "Encoding", "Quality Assessment", "Pattern Recognition", "Video Recording" ], "authors": [ { "affiliation": "University of Massachusetts Amherst", "fullName": "Yang Zhou", "givenName": "Yang", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Jimei Yang", "givenName": "Jimei", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Dingzeyu Li", "givenName": "Dingzeyu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Jun Saito", "givenName": "Jun", "surname": "Saito", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Deepali Aneja", "givenName": "Deepali", "surname": "Aneja", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts Amherst", "fullName": "Evangelos Kalogerakis", "givenName": "Evangelos", "surname": "Kalogerakis", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "3408-3418", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0LqlTwoZa", "name": "pcvpr202269460-09878525s1-mm_694600d408.zip", "size": "20.1 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878525s1-mm_694600d408.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600d397", "articleId": "1H1l4HxIb8k", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600d419", "articleId": "1H0N5arEqM8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsc/2009/3800/0/3800a509", "title": "Audio Clips Content Comparison Using Latent Semantic Indexing", "doi": null, "abstractUrl": "/proceedings-article/icsc/2009/3800a509/12OmNAJm0mf", "parentPublication": { "id": "proceedings/icsc/2009/3800/0", "title": "2009 IEEE International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2017/2937/0/2937a338", "title": "A Bitrate-Conservative Fast-Adjusting Rate Controller for Video Conferencing", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a338/12OmNAYXWEO", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2009/3718/0/3718a324", "title": "Cost-Effective Solution to Synchronized Audio-Visual Capture Using Multiple Sensors", "doi": null, "abstractUrl": "/proceedings-article/avss/2009/3718a324/12OmNApu5iP", "parentPublication": { "id": "proceedings/avss/2009/3718/0", "title": "2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/2/290920091", "title": "Automatic Audio Classification and Speaker Identification for Video Content Analysis", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290920091/12OmNAq3hws", "parentPublication": { "id": "proceedings/snpd/2007/2909/2", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607515", "title": "Layered screen video coding leveraging hardware video codec", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607515/12OmNBuL154", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521400", "title": "Audio, video and audio-visual signatures for short video clip detection: experiments on Trecvid2003", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521400/12OmNxR5UKg", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258175", "title": "Towards automated quality curation of video collections from a realistic perspective", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258175/17D45VVho2q", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1273", "title": "Audio2Gestures: Generating Diverse Gestures from Speech Audio with Conditional Variational Autoencoders", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1273/1BmF4nGRwEU", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600k0452", "title": "Learning Hierarchical Cross-Modal Association for Co-Speech Gesture Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600k0452/1H0O6vyTMKk", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f167", "title": "Audio-Visual Face Reenactment", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f167/1KxUt9lzkKQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1kcv3MPL2", "doi": "10.1109/CVPR52688.2022.00603", "title": "Volumetric Bundle Adjustment for Online Photorealistic Scene Capture", "normalizedTitle": "Volumetric Bundle Adjustment for Online Photorealistic Scene Capture", "abstract": "Efficient photorealistic scene capture is a challenging task. Current online reconstruction systems can operate very efficiently, but images generated from the models captured by these systems are often not photorealistic. Recent approaches based on neural volume rendering can render novel views at high fidelity, but they often require a long time to train, making them impractical for applications that require real-time scene capture. In this paper, we propose a system that can reconstruct photorealistic models of complex scenes in an efficient manner. Our system processes images online, i.e. it can obtain a good quality estimate of both the scene geometry and appearance at roughly the same rate the video is captured. To achieve the efficiency, we propose a hierarchical feature volume using VDB grids. This representation is memory efficient and allows for fast querying of the scene information. Secondly, we introduce a novel optimization technique that improves the efficiency of the bundle adjustment which allows our system to converge to the target camera poses and scene geometry much faster. Experiments on real-world scenes show that our method outperforms existing systems in terms of efficiency and capture quality. To the best of our knowledge, this is the first method that can achieve online photorealistic scene capture.", "abstracts": [ { "abstractType": "Regular", "content": "Efficient photorealistic scene capture is a challenging task. Current online reconstruction systems can operate very efficiently, but images generated from the models captured by these systems are often not photorealistic. Recent approaches based on neural volume rendering can render novel views at high fidelity, but they often require a long time to train, making them impractical for applications that require real-time scene capture. In this paper, we propose a system that can reconstruct photorealistic models of complex scenes in an efficient manner. Our system processes images online, i.e. it can obtain a good quality estimate of both the scene geometry and appearance at roughly the same rate the video is captured. To achieve the efficiency, we propose a hierarchical feature volume using VDB grids. This representation is memory efficient and allows for fast querying of the scene information. Secondly, we introduce a novel optimization technique that improves the efficiency of the bundle adjustment which allows our system to converge to the target camera poses and scene geometry much faster. Experiments on real-world scenes show that our method outperforms existing systems in terms of efficiency and capture quality. To the best of our knowledge, this is the first method that can achieve online photorealistic scene capture.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Efficient photorealistic scene capture is a challenging task. Current online reconstruction systems can operate very efficiently, but images generated from the models captured by these systems are often not photorealistic. Recent approaches based on neural volume rendering can render novel views at high fidelity, but they often require a long time to train, making them impractical for applications that require real-time scene capture. In this paper, we propose a system that can reconstruct photorealistic models of complex scenes in an efficient manner. Our system processes images online, i.e. it can obtain a good quality estimate of both the scene geometry and appearance at roughly the same rate the video is captured. To achieve the efficiency, we propose a hierarchical feature volume using VDB grids. This representation is memory efficient and allows for fast querying of the scene information. Secondly, we introduce a novel optimization technique that improves the efficiency of the bundle adjustment which allows our system to converge to the target camera poses and scene geometry much faster. Experiments on real-world scenes show that our method outperforms existing systems in terms of efficiency and capture quality. To the best of our knowledge, this is the first method that can achieve online photorealistic scene capture.", "fno": "694600g114", "keywords": [ "Cameras", "Image Reconstruction", "Optimisation", "Realistic Images", "Rendering Computer Graphics", "Solid Modelling", "Volumetric Bundle Adjustment", "Online Photorealistic Scene Capture", "Efficient Photorealistic Scene Capture", "Current Online Reconstruction Systems", "Neural Volume Rendering", "Real Time Scene Capture", "Photorealistic Models", "Complex Scenes", "Scene Geometry", "Memory Efficient", "Scene Information", "Real World Scenes", "Bundle Adjustment", "Geometry", "Streaming Media", "Rendering Computer Graphics", "Cameras", "Real Time Systems", "Topology" ], "authors": [ { "affiliation": "Imperial College London", "fullName": "Ronald Clark", "givenName": "Ronald", "surname": "Clark", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "6114-6122", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1kcouK3ew", "name": "pcvpr202269460-09879659s1-mm_694600g114.zip", "size": "1.74 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879659s1-mm_694600g114.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600g104", "articleId": "1H1mN1CfOWA", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600g123", "articleId": "1H1liGT3jaM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a175", "title": "Fast Incremental Bundle Adjustment with Covariance Recovery", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/181P2A31", "title": "Rolling shutter bundle adjustment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/2/114320644", "title": "Model-Based Bundle Adjustment with Application to Face Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114320644/12OmNC4eSlP", "parentPublication": { "id": "proceedings/iccv/2001/1143/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/3/212830902", "title": "3D Model Reconstruction by Constrained Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212830902/12OmNCvLXXS", "parentPublication": { "id": "proceedings/icpr/2004/2128/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995552", "title": "Multicore bundle adjustment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995552/12OmNyQGS4T", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a290", "title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fccm/2019/1131/0/113100a100", "title": "π-BA: Bundle Adjustment Acceleration on Embedded FPGAs with Co-observation Optimization", "doi": null, "abstractUrl": "/proceedings-article/fccm/2019/113100a100/1aPv1CATXTW", "parentPublication": { "id": "proceedings/fccm/2019/1131/0", "title": "2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09151354", "title": "Spatiotemporal Bundle Adjustment for Dynamic 3D Human Reconstruction in the Wild", "doi": null, "abstractUrl": "/journal/tp/2022/02/09151354/1lPCkW5UbPG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a663", "title": "Visualizing Spectral Bundle Adjustment Uncertainty", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a663/1qyxkDucGpG", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a424", "title": "Rotation-Only Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a424/1yeKdnywUaQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hVl8HQ7A9q", "doi": "10.1109/ICCV.2019.00794", "title": "Learning Perspective Undistortion of Portraits", "normalizedTitle": "Learning Perspective Undistortion of Portraits", "abstract": "Near-range portrait photographs often contain perspective distortion artifacts that bias human perception and challenge both facial recognition and reconstruction techniques. We present the first deep learning based approach to remove such artifacts from unconstrained portraits. In contrast to the previous state-of-the-art approach [23], our method handles even portraits with extreme perspective distortion, as we avoid the inaccurate and error-prone step of first fitting a 3D face model. Instead, we predict a distortion correction flow map that encodes a per-pixel displacement that removes distortion artifacts when applied to the input image. Our method also automatically infers missing facial features, i.e. occluded ears caused by strong perspective distortion, with coherent details. We demonstrate that our approach significantly outperforms the previous state-of-the-art [23] both qualitatively and quantitatively, particularly for portraits with extreme perspective distortion or facial expressions. We further show that our technique benefits a number of fundamental tasks, significantly improving the accuracy of both face recognition and 3D reconstruction and enables a novel camera calibration technique from a single portrait. Moreover, we also build the first perspective portrait database with a large diversity in identities, expression and poses.", "abstracts": [ { "abstractType": "Regular", "content": "Near-range portrait photographs often contain perspective distortion artifacts that bias human perception and challenge both facial recognition and reconstruction techniques. We present the first deep learning based approach to remove such artifacts from unconstrained portraits. In contrast to the previous state-of-the-art approach [23], our method handles even portraits with extreme perspective distortion, as we avoid the inaccurate and error-prone step of first fitting a 3D face model. Instead, we predict a distortion correction flow map that encodes a per-pixel displacement that removes distortion artifacts when applied to the input image. Our method also automatically infers missing facial features, i.e. occluded ears caused by strong perspective distortion, with coherent details. We demonstrate that our approach significantly outperforms the previous state-of-the-art [23] both qualitatively and quantitatively, particularly for portraits with extreme perspective distortion or facial expressions. We further show that our technique benefits a number of fundamental tasks, significantly improving the accuracy of both face recognition and 3D reconstruction and enables a novel camera calibration technique from a single portrait. Moreover, we also build the first perspective portrait database with a large diversity in identities, expression and poses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Near-range portrait photographs often contain perspective distortion artifacts that bias human perception and challenge both facial recognition and reconstruction techniques. We present the first deep learning based approach to remove such artifacts from unconstrained portraits. In contrast to the previous state-of-the-art approach [23], our method handles even portraits with extreme perspective distortion, as we avoid the inaccurate and error-prone step of first fitting a 3D face model. Instead, we predict a distortion correction flow map that encodes a per-pixel displacement that removes distortion artifacts when applied to the input image. Our method also automatically infers missing facial features, i.e. occluded ears caused by strong perspective distortion, with coherent details. We demonstrate that our approach significantly outperforms the previous state-of-the-art [23] both qualitatively and quantitatively, particularly for portraits with extreme perspective distortion or facial expressions. We further show that our technique benefits a number of fundamental tasks, significantly improving the accuracy of both face recognition and 3D reconstruction and enables a novel camera calibration technique from a single portrait. Moreover, we also build the first perspective portrait database with a large diversity in identities, expression and poses.", "fno": "480300h848", "keywords": [ "Face Recognition", "Image Reconstruction", "Learning Artificial Intelligence", "Neural Nets", "Solid Modelling", "Perspective Undistortion", "Near Range Portrait Photographs", "Perspective Distortion Artifacts", "Facial Recognition", "Reconstruction Techniques", "Deep Learning", "Unconstrained Portraits", "Extreme Perspective Distortion", "3 D Face Model", "Distortion Correction Flow Map", "Facial Features", "Facial Expressions", "Face Recognition", "Perspective Portrait Database", "Perspective Distortion", "Camera Calibration Technique", "Cameras", "Distortion", "Face", "Three Dimensional Displays", "Image Reconstruction", "Solid Modeling" ], "authors": [ { "affiliation": "Institution for Creative Technologies. University of Southern California", "fullName": "Yajie Zhao", "givenName": "Yajie", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Southern California", "fullName": "Zeng Huang", "givenName": "Zeng", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Southern California", "fullName": "Tianye Li", "givenName": "Tianye", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "USC Institute for Creative Technology", "fullName": "Weikai Chen", "givenName": "Weikai", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "USC Institute for Creative Technology", "fullName": "Chloe Legendre", "givenName": "Chloe", "surname": "Legendre", "__typename": "ArticleAuthorType" }, { "affiliation": "Institution for Creative Technologies. University of Southern California", "fullName": "Xinglei Ren", "givenName": "Xinglei", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": "USC Institute for Creative Technologies", "fullName": "Ari Shapiro", "givenName": "Ari", "surname": "Shapiro", "__typename": "ArticleAuthorType" }, { "affiliation": "Pinscreen/University of Southern California/USC ICT", "fullName": "Hao Li", "givenName": "Hao", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "7848-7858", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300h838", "articleId": "1hVlKlnB79e", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300h859", "articleId": "1hVlDgdiU2A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b813", "title": "Position Determines Perspective: Investigating Perspective Distortion for Image Forensics of Faces", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b813/12OmNC8dgf7", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301314", "title": "Perspective distortion modeling, learning and compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301314/12OmNCvumRJ", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163086", "title": "The beauty of capturing faces: Rating the quality of digital portraits", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163086/12OmNwErpM1", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163149", "title": "Correcting radial and perspective distortion by using face shape information", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163149/12OmNyKJilp", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545037", "title": "Deep Learning-based Face Recognition and the Robustness to Perspective Distortion", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545037/17D45WHONrq", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9657", "title": "Semi-Supervised Wide-Angle Portraits Correction by Multi-Scale Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9657/1H1n2dj52eI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10005833", "title": "Monocular 3D Fingerprint Reconstruction and Unwarping", "doi": null, "abstractUrl": "/journal/tp/5555/01/10005833/1JF3RNqTuNy", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199560", "title": "Photorealistic Audio-driven Video Portraits", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199560/1ncguu1AZdS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09540284", "title": "Robust and Accurate 3D Self-Portraits in Seconds", "doi": null, "abstractUrl": "/journal/tp/2022/11/09540284/1wWCcQDdEZi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900d497", "title": "Practical Wide-Angle Portraits Correction with Deep Structured Models", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900d497/1yeKI0xPTb2", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmiEASmsGQ", "doi": "10.1109/ICPR48806.2021.9412380", "title": "Coherence and Identity Learning for Arbitrary-length Face Video Generation", "normalizedTitle": "Coherence and Identity Learning for Arbitrary-length Face Video Generation", "abstract": "Face synthesis is an interesting yet challenging task in computer vision. It is even much harder to generate a portrait video than a single image. In this paper, we propose a novel video generation framework for synthesizing arbitrary-length face videos without any face exemplar or landmark. To overcome the synthesis ambiguity of face video, we propose a divide-and-conquer strategy to separately address the video face synthesis problem from two aspects, face identity synthesis and rearrangement. To this end, we design a cascaded network which contains three components, Identity-aware GAN (IA-GAN), Face Coherence Network, and Interpolation Network. IA-GAN is proposed to synthesize photorealistic faces with the same identity from a set of noises. Face Coherence Network is designed to re-arrange the faces generated by IA-GAN while keeping the inter-frame coherence. Interpolation Network is introduced to eliminate the discontinuity between two adjacent frames and improve the smoothness of the face video. Experimental results demonstrate that our proposed network is able to generate face video with high visual quality while preserving the identity. Statistics show that our method outperforms state-of-the-art unconditional face video generative models in multiple challenging datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Face synthesis is an interesting yet challenging task in computer vision. It is even much harder to generate a portrait video than a single image. In this paper, we propose a novel video generation framework for synthesizing arbitrary-length face videos without any face exemplar or landmark. To overcome the synthesis ambiguity of face video, we propose a divide-and-conquer strategy to separately address the video face synthesis problem from two aspects, face identity synthesis and rearrangement. To this end, we design a cascaded network which contains three components, Identity-aware GAN (IA-GAN), Face Coherence Network, and Interpolation Network. IA-GAN is proposed to synthesize photorealistic faces with the same identity from a set of noises. Face Coherence Network is designed to re-arrange the faces generated by IA-GAN while keeping the inter-frame coherence. Interpolation Network is introduced to eliminate the discontinuity between two adjacent frames and improve the smoothness of the face video. Experimental results demonstrate that our proposed network is able to generate face video with high visual quality while preserving the identity. Statistics show that our method outperforms state-of-the-art unconditional face video generative models in multiple challenging datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face synthesis is an interesting yet challenging task in computer vision. It is even much harder to generate a portrait video than a single image. In this paper, we propose a novel video generation framework for synthesizing arbitrary-length face videos without any face exemplar or landmark. To overcome the synthesis ambiguity of face video, we propose a divide-and-conquer strategy to separately address the video face synthesis problem from two aspects, face identity synthesis and rearrangement. To this end, we design a cascaded network which contains three components, Identity-aware GAN (IA-GAN), Face Coherence Network, and Interpolation Network. IA-GAN is proposed to synthesize photorealistic faces with the same identity from a set of noises. Face Coherence Network is designed to re-arrange the faces generated by IA-GAN while keeping the inter-frame coherence. Interpolation Network is introduced to eliminate the discontinuity between two adjacent frames and improve the smoothness of the face video. Experimental results demonstrate that our proposed network is able to generate face video with high visual quality while preserving the identity. Statistics show that our method outperforms state-of-the-art unconditional face video generative models in multiple challenging datasets.", "fno": "09412380", "keywords": [ "Computer Vision", "Face Recognition", "Learning Artificial Intelligence", "Video Signal Processing", "Portrait Video", "Video Generation Framework", "Arbitrary Length Face Videos", "Face Exemplar", "Video Face Synthesis Problem", "Face Identity Synthesis", "Rearrangement", "Identity Aware GAN", "IA GAN", "Face Coherence Network", "Interpolation Network", "Photorealistic Faces", "State Of The Art Unconditional Face Video Generative Models", "Arbitrary Length Face Video Generation", "Interpolation", "Visualization", "Computer Vision", "Face Recognition", "Coherence", "Generative Adversarial Networks", "Generators" ], "authors": [ { "affiliation": "City University of Hong Kong,Department of Computer Science", "fullName": "Shuquan Ye", "givenName": "Shuquan", "surname": "Ye", "__typename": "ArticleAuthorType" }, { "affiliation": "Guangdong Provincial People's Hospital Guangdong Academy of Medical Sciences,Department of Radiology", "fullName": "Chu Han", "givenName": "Chu", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": "City University of Hong Kong,Department of Computer Science", "fullName": "Jiaying Lin", "givenName": "Jiaying", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Engineering, South China University of Technology", "fullName": "Guoqiang Han", "givenName": "Guoqiang", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Engineering, South China University of Technology", "fullName": "Shengfeng He", "givenName": "Shengfeng", "surname": "He", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "915-922", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09412242", "articleId": "1tmikTAVIYM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412522", "articleId": "1tmiuHfzD1K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000g713", "title": "Towards Open-Set Identity Preserving Face Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g713/17D45VUZMYT", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i398", "title": "Pose-Guided Photorealistic Face Rotation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i398/17D45VsBTWD", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a821", "title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756527", "title": "Matching Thermal to Visible Face Images Using a Semantic-Guided Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756527/1bzYu44KpnW", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093275", "title": "Cross-Domain Face Synthesis using a Controllable GAN", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093275/1jPbyOXsJvW", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2020/3079/0/307900a024", "title": "Dual-Attention GAN for Large-Pose Face Frontalization", "doi": null, "abstractUrl": "/proceedings-article/fg/2020/307900a024/1kecHPwIBLa", "parentPublication": { "id": "proceedings/fg/2020/3079/0/", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151060", "title": "Triple-GAN: Progressive Face Aging with Triple Translation Loss", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151060/1lPHg6ccqac", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h082", "title": "Learning Identity-Invariant Motion Representations for Cross-ID Face Reenactment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h082/1m3nhQ6ft9S", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2020/9228/0/922800a647", "title": "Dialog Driven Face Construction using GANs", "doi": null, "abstractUrl": "/proceedings-article/ictai/2020/922800a647/1pP3uWLzXLq", "parentPublication": { "id": "proceedings/ictai/2020/9228/0", "title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413167", "title": "Identity-Preserved Face Beauty Transformation with Conditional Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413167/1tmjmrPHrJS", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uqGdWlamUo", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uqGhhWte3S", "doi": "10.1109/WACV48630.2021.00113", "title": "Real-time Localized Photorealistic Video Style Transfer", "normalizedTitle": "Real-time Localized Photorealistic Video Style Transfer", "abstract": "We present a novel algorithm for transferring artistic styles of semantically meaningful local regions of an image onto local regions of a target video while preserving its photorealism. Local regions may be selected either fully automatically from an image, through using video segmentation algorithms, or from casual user guidance such as scribbles. Our method, based on a deep neural network architecture inspired by recent work in photorealistic style transfer, is real-time and works on arbitrary inputs without runtime optimization once trained on a diverse dataset of artistic styles. By augmenting our video dataset with noisy semantic labels and jointly optimizing over style, content, mask, and temporal losses, our method can cope with a variety of imperfections in the input and produce temporally coherent videos without visual artifacts. We demonstrate our method on a variety of style images and target videos, including the ability to transfer different styles onto multiple objects simultaneously, and smoothly transition between styles in time.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel algorithm for transferring artistic styles of semantically meaningful local regions of an image onto local regions of a target video while preserving its photorealism. Local regions may be selected either fully automatically from an image, through using video segmentation algorithms, or from casual user guidance such as scribbles. Our method, based on a deep neural network architecture inspired by recent work in photorealistic style transfer, is real-time and works on arbitrary inputs without runtime optimization once trained on a diverse dataset of artistic styles. By augmenting our video dataset with noisy semantic labels and jointly optimizing over style, content, mask, and temporal losses, our method can cope with a variety of imperfections in the input and produce temporally coherent videos without visual artifacts. We demonstrate our method on a variety of style images and target videos, including the ability to transfer different styles onto multiple objects simultaneously, and smoothly transition between styles in time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel algorithm for transferring artistic styles of semantically meaningful local regions of an image onto local regions of a target video while preserving its photorealism. Local regions may be selected either fully automatically from an image, through using video segmentation algorithms, or from casual user guidance such as scribbles. Our method, based on a deep neural network architecture inspired by recent work in photorealistic style transfer, is real-time and works on arbitrary inputs without runtime optimization once trained on a diverse dataset of artistic styles. By augmenting our video dataset with noisy semantic labels and jointly optimizing over style, content, mask, and temporal losses, our method can cope with a variety of imperfections in the input and produce temporally coherent videos without visual artifacts. We demonstrate our method on a variety of style images and target videos, including the ability to transfer different styles onto multiple objects simultaneously, and smoothly transition between styles in time.", "fno": "047700b088", "keywords": [ "Deep Learning Artificial Intelligence", "Image Colour Analysis", "Image Segmentation", "Neural Net Architecture", "Real Time Systems", "Video Signal Processing", "Video Segmentation", "Deep Neural Network Architecture", "Photorealistic Style Transfer", "Runtime Optimization", "Real Time Localized Photorealistic Video Style Transfer", "Image Regions", "Video Regions", "Visualization", "Photorealism", "Runtime", "Semantics", "Transforms", "Streaming Media", "Real Time Systems" ], "authors": [ { "affiliation": "Boston University", "fullName": "Xide Xia", "givenName": "Xide", "surname": "Xia", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Tianfan Xue", "givenName": "Tianfan", "surname": "Xue", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Wei-sheng Lai", "givenName": "Wei-sheng", "surname": "Lai", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Zheng Sun", "givenName": "Zheng", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Abby Chang", "givenName": "Abby", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": "Boston University", "fullName": "Brian Kulis", "givenName": "Brian", "surname": "Kulis", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Jiawen Chen", "givenName": "Jiawen", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "1088-1097", "year": "2021", "issn": null, "isbn": "978-1-6654-0477-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "047700b078", "articleId": "1uqGpgDC9P2", "__typename": "AdjacentArticleType" }, "next": { "fno": "047700b098", "articleId": "1uqGL0y86dy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457h178", "title": "Multimodal Transfer: A Hierarchical Deep Convolutional Neural Network for Fast Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457h178/12OmNxxvAQq", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011840", "title": "Extraction and representation of human body for pitching style recognition in broadcast baseball video", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011840/12OmNzd7bDp", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4589", "title": "Domain-Aware Universal Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4589/1BmEW5hrQNW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h683", "title": "Pastiche Master: Exemplar-Based High-Resolution Portrait Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h683/1H0NNPChQsM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300d804", "title": "Learning Linear Transformations for Fast Image and Video Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300d804/1gysdlZwwxi", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j035", "title": "Photorealistic Style Transfer via Wavelet Transforms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j035/1hQqmpD9dy8", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d189", "title": "Class-Based Styling: Real-Time Localized Style Transfer with Semantic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d189/1i5mBjqvedq", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093420", "title": "Fast Video Multi-Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093420/1jPbo2cNSE0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a017", "title": "SelfieArt: Interactive Multi-Style Transfer for Selfies and Videos with Soft Transitions", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a017/1p2VA30rt16", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a872", "title": "DualAST: Dual Style-Learning Networks for Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a872/1yeKFMpqICs", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uqGdWlamUo", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uqGiEGRWQ8", "doi": "10.1109/WACV48630.2021.00009", "title": "Audio- and Gaze-driven Facial Animation of Codec Avatars", "normalizedTitle": "Audio- and Gaze-driven Facial Animation of Codec Avatars", "abstract": "Codec Avatars are a recent class of learned, photorealistic face models that accurately represent the geometry and texture of a person in 3D (i.e., for virtual reality), and are almost indistinguishable from video [28]. In this paper we describe the first approach to animate these parametric models in real-time which could be deployed on commodity virtual reality hardware using audio and/or eye tracking. Our goal is to display expressive conversations between individuals that exhibit important social signals such as laughter and excitement solely from la-tent cues in our lossy input signals. To this end we collected over 5 hours of high frame rate 3D face scans across three participants including traditional neutral speech as well as expressive and conversational speech. We investigate a multimodal fusion approach that dynamically identifies which sensor encoding should animate which parts of the face at any time. See the supplemental video which demonstrates our ability to generate full face motion far beyond the typically neutral lip articulations seen in competing work: https://research.fb.com/videos/audio-and-gaze-driven-facial-animation-of-codec-avatars/", "abstracts": [ { "abstractType": "Regular", "content": "Codec Avatars are a recent class of learned, photorealistic face models that accurately represent the geometry and texture of a person in 3D (i.e., for virtual reality), and are almost indistinguishable from video [28]. In this paper we describe the first approach to animate these parametric models in real-time which could be deployed on commodity virtual reality hardware using audio and/or eye tracking. Our goal is to display expressive conversations between individuals that exhibit important social signals such as laughter and excitement solely from la-tent cues in our lossy input signals. To this end we collected over 5 hours of high frame rate 3D face scans across three participants including traditional neutral speech as well as expressive and conversational speech. We investigate a multimodal fusion approach that dynamically identifies which sensor encoding should animate which parts of the face at any time. See the supplemental video which demonstrates our ability to generate full face motion far beyond the typically neutral lip articulations seen in competing work: https://research.fb.com/videos/audio-and-gaze-driven-facial-animation-of-codec-avatars/", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Codec Avatars are a recent class of learned, photorealistic face models that accurately represent the geometry and texture of a person in 3D (i.e., for virtual reality), and are almost indistinguishable from video [28]. In this paper we describe the first approach to animate these parametric models in real-time which could be deployed on commodity virtual reality hardware using audio and/or eye tracking. Our goal is to display expressive conversations between individuals that exhibit important social signals such as laughter and excitement solely from la-tent cues in our lossy input signals. To this end we collected over 5 hours of high frame rate 3D face scans across three participants including traditional neutral speech as well as expressive and conversational speech. We investigate a multimodal fusion approach that dynamically identifies which sensor encoding should animate which parts of the face at any time. See the supplemental video which demonstrates our ability to generate full face motion far beyond the typically neutral lip articulations seen in competing work: https://research.fb.com/videos/audio-and-gaze-driven-facial-animation-of-codec-avatars/", "fno": "047700a041", "keywords": [ "Avatars", "Computer Animation", "Face Recognition", "Realistic Images", "Solid Modelling", "Gaze Driven Facial Animation", "Codec Avatars", "Learned Face Models", "Photorealistic Face Models", "Parametric Models", "Commodity Virtual Reality Hardware", "Expressive Conversations", "Social Signals", "Lossy Input Signals", "High Frame Rate 3 D", "Traditional Neutral Speech", "Expressive Speech", "Conversational Speech", "Multimodal Fusion Approach", "Supplemental Video", "Face Motion", "Audio Driven Facial Animation", "Time 5 0 Hour", "Solid Modeling", "Three Dimensional Displays", "Codecs", "Lips", "Avatars", "Streaming Media", "Real Time Systems" ], "authors": [ { "affiliation": "Facebook Reality Labs", "fullName": "Alexander Richard", "givenName": "Alexander", "surname": "Richard", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Colin Lea", "givenName": "Colin", "surname": "Lea", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Shugao Ma", "givenName": "Shugao", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Bonn", "fullName": "Juergen Gall", "givenName": "Juergen", "surname": "Gall", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Fernando de la Torre", "givenName": "Fernando de", "surname": "la Torre", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Yaser Sheikh", "givenName": "Yaser", "surname": "Sheikh", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "41-50", "year": "2021", "issn": null, "isbn": "978-1-6654-0477-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "047700a032", "articleId": "1uqGGaQQkwg", "__typename": "AdjacentArticleType" }, "next": { "fno": "047700a051", "articleId": "1uqGCr3WDbW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802113", "title": "Automatic acquisition and animation of virtual avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892240", "title": "Rapid one-shot acquisition of dynamic VR avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811044", "title": "Crafting Personalized Facial Avatars Using Editable Portrait and Photograph Example", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811044/12OmNx7ouYf", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549379", "title": "Head motion animation using avatar gaze space", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892372", "title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3535", "title": "I M Avatar: Implicit Morphable Head Avatars from Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3535/1H1j2BWBE2c", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a666", "title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f167", "title": "Audio-Visual Face Reenactment", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f167/1KxUt9lzkKQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a438", "title": "MAGIC: Manipulating Avatars and Gestures to Improve Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a438/1MNgEQ9uIBW", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a064", "title": "Pixel Codec Avatars", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a064/1yeMmobLlwQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJcLeKILw4", "doi": "10.1109/VRW55335.2022.00108", "title": "Subjective and Objective Analyses of Collaboration and Co-Presence in a Virtual Reality Remote Environment", "normalizedTitle": "Subjective and Objective Analyses of Collaboration and Co-Presence in a Virtual Reality Remote Environment", "abstract": "Remote collaboration in virtual reality has gained attention and proved to be a viable solution for providing effective collaboration environments for physically distant collaborators. This study compares head-mounted display (HMD)- and computer-based remote collaboration solutions that allow users to interact with each other through immersive environments. Analyzing remote collaboration in immersive environments requires understanding group interactions and personal experiences. For this purpose, a 3D object assembly task was performed by 10 participants using self-reported surveys and physiological measures to investigate the effectiveness of collaboration from the users' perspective. The results showed that the HMD-based remote collaboration in a virtual reality environment increased the sense of co-presence among the users.", "abstracts": [ { "abstractType": "Regular", "content": "Remote collaboration in virtual reality has gained attention and proved to be a viable solution for providing effective collaboration environments for physically distant collaborators. This study compares head-mounted display (HMD)- and computer-based remote collaboration solutions that allow users to interact with each other through immersive environments. Analyzing remote collaboration in immersive environments requires understanding group interactions and personal experiences. For this purpose, a 3D object assembly task was performed by 10 participants using self-reported surveys and physiological measures to investigate the effectiveness of collaboration from the users' perspective. The results showed that the HMD-based remote collaboration in a virtual reality environment increased the sense of co-presence among the users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Remote collaboration in virtual reality has gained attention and proved to be a viable solution for providing effective collaboration environments for physically distant collaborators. This study compares head-mounted display (HMD)- and computer-based remote collaboration solutions that allow users to interact with each other through immersive environments. Analyzing remote collaboration in immersive environments requires understanding group interactions and personal experiences. For this purpose, a 3D object assembly task was performed by 10 participants using self-reported surveys and physiological measures to investigate the effectiveness of collaboration from the users' perspective. The results showed that the HMD-based remote collaboration in a virtual reality environment increased the sense of co-presence among the users.", "fno": "840200a485", "keywords": [ "Groupware", "Helmet Mounted Displays", "Virtual Reality", "Head Mounted Display", "Computer Based Remote Collaboration Solutions", "Group Interactions", "3 D Object Assembly Task", "HMD Based Remote Collaboration", "Virtual Reality Remote Environment", "Physically Distant Collaborators", "Self Reported Surveys", "Social Computing", "Three Dimensional Displays", "Head Mounted Displays", "Conferences", "Collaboration", "Virtual Reality", "Resists", "Virtual Environment", "Virtual Reality", "Co Presence", "Remote Collaboration", "Physiology", "Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality", "Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Collaborative Interaction", "Human Centered Computing Collaborative And Social Computing Collaborative And Social Computing Theory", "Concept And Paradigms Computer Supported Cooperative Work" ], "authors": [ { "affiliation": "University of Illinois at Chicago", "fullName": "Allison Bayro", "givenName": "Allison", "surname": "Bayro", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois at Chicago", "fullName": "Yalda Ghasemi", "givenName": "Yalda", "surname": "Ghasemi", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois at Chicago", "fullName": "Heejin Jeong", "givenName": "Heejin", "surname": "Jeong", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "485-487", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a483", "articleId": "1CJfsQRBRRK", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a488", "articleId": "1CJf1ozbD3i", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892320", "title": "A mixed reality tele-presence platform to exchange emotion and sensory information based on MPEG-V standard", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892320/12OmNxUdv7D", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446345", "title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a940", "title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a238", "title": "Elicitation of Interaction Techniques with 3D Data Visualizations in Immersive Environment using HMDs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a238/1J7W8EXefza", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797966", "title": "A Mixed Presence Collaborative Mixed Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797966/1cJ19fldjVu", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a649", "title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a665", "title": "Supporting Medical Auxiliary Work: The Central Sterile Services Department as a Challenging Environment for Augmented Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a665/1pysyCXzE8o", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a542", "title": "Field of View Effect on Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a532", "title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7WdNbgEFy", "doi": "10.1109/ISMAR-Adjunct57072.2022.00161", "title": "Effects of Avatar Face Level of Detail Control on Social Presence in Augmented Reality Remote Collaboration", "normalizedTitle": "Effects of Avatar Face Level of Detail Control on Social Presence in Augmented Reality Remote Collaboration", "abstract": "This study explored how avatar face Level of Detail (LOD) and task types affect users' social presence and interpersonal communication in Augmented Reality remote collaboration. In AR remote collaboration, users are represented as avatars, and an actual user's appearance inevitably changes. While certain situations require an avatar that resembles a user, it is challenging to reconstruct everything realistically due to its high cost. Since facial parts show relative significance compared to body parts, we proposed controlling face LOD as an approach to lowering the cost while maintaining social presence in AR environments. Our results showed that while Mid-LOD maintains social presence but reduces costs compared to High-LOD, Low-LOD negatively impacts social presence mainly due to emotional understanding. Based on our findings, we highlight the importance of 1) considering facial expressions and emotional understanding in managing face LOD, 2) figuring out the collaboration context with different effects of tasks and communication behaviors, and 3) providing additional communication cues for maintaining social presence while lowering the cost.", "abstracts": [ { "abstractType": "Regular", "content": "This study explored how avatar face Level of Detail (LOD) and task types affect users' social presence and interpersonal communication in Augmented Reality remote collaboration. In AR remote collaboration, users are represented as avatars, and an actual user's appearance inevitably changes. While certain situations require an avatar that resembles a user, it is challenging to reconstruct everything realistically due to its high cost. Since facial parts show relative significance compared to body parts, we proposed controlling face LOD as an approach to lowering the cost while maintaining social presence in AR environments. Our results showed that while Mid-LOD maintains social presence but reduces costs compared to High-LOD, Low-LOD negatively impacts social presence mainly due to emotional understanding. Based on our findings, we highlight the importance of 1) considering facial expressions and emotional understanding in managing face LOD, 2) figuring out the collaboration context with different effects of tasks and communication behaviors, and 3) providing additional communication cues for maintaining social presence while lowering the cost.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study explored how avatar face Level of Detail (LOD) and task types affect users' social presence and interpersonal communication in Augmented Reality remote collaboration. In AR remote collaboration, users are represented as avatars, and an actual user's appearance inevitably changes. While certain situations require an avatar that resembles a user, it is challenging to reconstruct everything realistically due to its high cost. Since facial parts show relative significance compared to body parts, we proposed controlling face LOD as an approach to lowering the cost while maintaining social presence in AR environments. Our results showed that while Mid-LOD maintains social presence but reduces costs compared to High-LOD, Low-LOD negatively impacts social presence mainly due to emotional understanding. Based on our findings, we highlight the importance of 1) considering facial expressions and emotional understanding in managing face LOD, 2) figuring out the collaboration context with different effects of tasks and communication behaviors, and 3) providing additional communication cues for maintaining social presence while lowering the cost.", "fno": "536500a763", "keywords": [ "Avatars", "Computer Aided Instruction", "Computer Mediated Communication", "Face Recognition", "Groupware", "Human Computer Interaction", "Social Aspects Of Automation", "Actual User", "AR Remote Collaboration", "Augmented Reality Remote Collaboration", "Avatar Face Level", "Face LOD", "High LOD", "Interpersonal Communication", "Level Of Detail", "Low LOD", "Mid LOD", "Social Presence", "Task Types", "Costs", "Avatars", "Collaboration", "Mouth", "Particle Measurements", "Behavioral Sciences", "Task Analysis", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality", "HCI Design And Evaluation Methods", "User Studies" ], "authors": [ { "affiliation": "KAIST UVR Lab", "fullName": "Seoyoung Kang", "givenName": "Seoyoung", "surname": "Kang", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab", "fullName": "Boram Yoon", "givenName": "Boram", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST VCL", "fullName": "Bowon Kim", "givenName": "Bowon", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab", "fullName": "Woontack Woo", "givenName": "Woontack", "surname": "Woo", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "763-767", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a757", "articleId": "1J7Wbl37JtK", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a768", "articleId": "1J7WxsKP0Ry", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/irc/2017/6724/0/07926508", "title": "Gutsy-Avatar: Computational Assimilation for Advanced Communication and Collaboration", "doi": null, "abstractUrl": "/proceedings-article/irc/2017/07926508/12OmNBLdKLX", "parentPublication": { "id": "proceedings/irc/2017/6724/0", "title": "2017 First IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2017/6716/0/07893357", "title": "Influence of avatar appearance on presence in social VR", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893357/12OmNwwuDSr", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446261", "title": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446261/13bd1gCd7T2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a613", "title": "Evaluating Modifying Teacher Avatar Clip Sequencing Based on Eye-Tracked Visual Attention in Educational VR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a613/1J7WepoS2w8", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049676", "title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798044", "title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797719", "title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382845", "title": "The Influence of Avatar Representation on Interpersonal Communication in Virtual Social Environments", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382845/1saZq7bIPUQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a135", "title": "Towards Avatars for Remote Communication using Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a135/1tnY3zC32KI", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a139", "title": "VR Collaboration in Large Companies: An Interview Study on the Role of Avatars", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a139/1yeQK6CDe3C", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0NzKEIjS", "doi": "10.1109/VR.2019.8798203", "title": "[DC] Case-studies of Contemporary Presence Theory: Towards More Objective and Reliable Measures of Presence", "normalizedTitle": "[DC] Case-studies of Contemporary Presence Theory: Towards More Objective and Reliable Measures of Presence", "abstract": "A large body of literature is concerned with models of presence-the sensory illusion of being part of a virtual scene-but there is still no general agreement on how to measure it in an objective and reliable way. When it comes to virtual reality, presence is often considered as one of the main factors contributing to quality of experience, yet existing methods either rely on subjective assessments of users or on specifics of the virtual environment they are applied in, making it difficult for experimental procedures to be generalized. This paper presents ideas for research into promising measures of presence, based on first experiments with novel behavioral measures inside a rich environment which users can feel present in more naturally.", "abstracts": [ { "abstractType": "Regular", "content": "A large body of literature is concerned with models of presence-the sensory illusion of being part of a virtual scene-but there is still no general agreement on how to measure it in an objective and reliable way. When it comes to virtual reality, presence is often considered as one of the main factors contributing to quality of experience, yet existing methods either rely on subjective assessments of users or on specifics of the virtual environment they are applied in, making it difficult for experimental procedures to be generalized. This paper presents ideas for research into promising measures of presence, based on first experiments with novel behavioral measures inside a rich environment which users can feel present in more naturally.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A large body of literature is concerned with models of presence-the sensory illusion of being part of a virtual scene-but there is still no general agreement on how to measure it in an objective and reliable way. When it comes to virtual reality, presence is often considered as one of the main factors contributing to quality of experience, yet existing methods either rely on subjective assessments of users or on specifics of the virtual environment they are applied in, making it difficult for experimental procedures to be generalized. This paper presents ideas for research into promising measures of presence, based on first experiments with novel behavioral measures inside a rich environment which users can feel present in more naturally.", "fno": "08798203", "keywords": [ "Behavioural Sciences", "Virtual Reality", "Contemporary Presence Theory", "Objective Measures", "Reliable Measures", "General Agreement", "Virtual Reality", "Quality Of Experience", "Virtual Environment", "Behavioral Measures", "Sensory Illusion", "Virtual Scene", "Atmospheric Measurements", "Particle Measurements", "Virtual Environments", "Games", "Real Time Systems", "Reliability", "Human Centered Computing", "Human Computer Interaction HCI", "HCI Design And Evaluation Methods", "User Studies", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "Sheffield Hallam University, Reutlingen University", "fullName": "Johannes Schirm", "givenName": "Johannes", "surname": "Schirm", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1363-1364", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797893", "articleId": "1cJ0NJAEGQw", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798147", "articleId": "1cJ0HhK5ANW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892315", "title": "The effect of geometric realism on presence in a virtual reality game", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892315/12OmNBTawwY", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itag/2014/6795/0/6795a013", "title": "How Body Movement Influences Virtual Reality Analgesia?", "doi": null, "abstractUrl": "/proceedings-article/itag/2014/6795a013/12OmNC4wtBe", "parentPublication": { "id": "proceedings/itag/2014/6795/0", "title": "2014 International Conference on Interactive Technologies and Games (iTAG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892344", "title": "Immersion and coherence in a visual cliff environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892344/12OmNy50gc9", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446235", "title": "Influences on the Elicitation of Interpersonal Space with Virtual Humans", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446235/13bd1eW2l9F", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a538", "title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797870", "title": "The Influence of Body Position on Presence When Playing a Virtual Reality Game", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797870/1cJ0RyhQnC0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089623", "title": "Asymmetric Effects of the Ebbinghaus Illusion on Depth Judgments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089623/1jIx9JY7KHS", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09206143", "title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09293401", "title": "Self-Illusion: A Study on Cognition of Role-Playing in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/08/09293401/1pyonpfZjoY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a474", "title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a474/1pysuR65ESQ", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdsuf", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNxYtucC", "doi": "10.1109/ISMAR.2015.44", "title": "[POSTER] An Adaptive Augmented Reality Interface for Hand Based on Probabilistic Approach", "normalizedTitle": "[POSTER] An Adaptive Augmented Reality Interface for Hand Based on Probabilistic Approach", "abstract": "In this paper we propose an adaptive Augmented Reality interface for hand gestures based on a probabilistic model. The proposed method provides an in-situ interface and the corresponding functionalities by recognizing a context of hand shape and gesture which requires the accurate recognition of static and dynamic hand states. We present an appearance-based hand feature representation that yields robustness against hand shape variations, and a feature extraction method based on the fingertip likelihood from a GMM model. Experimental results show that both context-sensitivity and accurate hand gesture recognition are achieved throughout the quantitative evaluation and its implementation as a three-in-one virtual interface.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we propose an adaptive Augmented Reality interface for hand gestures based on a probabilistic model. The proposed method provides an in-situ interface and the corresponding functionalities by recognizing a context of hand shape and gesture which requires the accurate recognition of static and dynamic hand states. We present an appearance-based hand feature representation that yields robustness against hand shape variations, and a feature extraction method based on the fingertip likelihood from a GMM model. Experimental results show that both context-sensitivity and accurate hand gesture recognition are achieved throughout the quantitative evaluation and its implementation as a three-in-one virtual interface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we propose an adaptive Augmented Reality interface for hand gestures based on a probabilistic model. The proposed method provides an in-situ interface and the corresponding functionalities by recognizing a context of hand shape and gesture which requires the accurate recognition of static and dynamic hand states. We present an appearance-based hand feature representation that yields robustness against hand shape variations, and a feature extraction method based on the fingertip likelihood from a GMM model. Experimental results show that both context-sensitivity and accurate hand gesture recognition are achieved throughout the quantitative evaluation and its implementation as a three-in-one virtual interface.", "fno": "7660a152", "keywords": [ "Shape", "Estimation", "Gesture Recognition", "Accuracy", "Robustness", "Augmented Reality", "Adaptation Models" ], "authors": [ { "affiliation": null, "fullName": "Jinki Jung", "givenName": "Jinki", "surname": "Jung", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hyeopwoo Lee", "givenName": "Hyeopwoo", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hyun Seung Yang", "givenName": "Hyun Seung", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "152-155", "year": "2015", "issn": null, "isbn": "978-1-4673-7660-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7660a148", "articleId": "12OmNyKJicb", "__typename": "AdjacentArticleType" }, "next": { "fno": "7660a156", "articleId": "12OmNCcKQqU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2011/2183/0/06162914", "title": "Bare-hand-based augmented reality interface on mobile phone", "doi": null, "abstractUrl": "/proceedings-article/ismar/2011/06162914/12OmNAoUT6D", "parentPublication": { "id": "proceedings/ismar/2011/2183/0", "title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550212", "title": "Poster: Markerless fingertip-based 3D interaction for handheld augmented reality in a small workspace", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550212/12OmNBsue2b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325483", "title": "A preliminary study of a hybrid user interface for augmented reality applications", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325483/12OmNBtl1pV", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2002/1492/0/14920287", "title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920287/12OmNqH9htu", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2002/1602/0/16020429", "title": "Real-Time Tracking of Multiple Fingertips and Gesture Recognition for Augmented Desk Interface Systems", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020429/12OmNvnfkcc", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcis/2013/2885/0/06805920", "title": "Hand Gesture Recognition Based on Fingertip Detection", "doi": null, "abstractUrl": "/proceedings-article/gcis/2013/06805920/12OmNwGIcAb", "parentPublication": { "id": "proceedings/gcis/2013/2885/0", "title": "2013 Fourth Global Congress on Intelligent Systems (GCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504716", "title": "Monochrome glove: A robust real-time hand gesture recognition method by using a fabric glove with design of structured markers", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504716/12OmNz2kqqa", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a115", "title": "[POSTER] ChiroChroma: An Augmented Reality Game for the Assessment of Hand Motor Functionality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a115/12OmNzcxZ6D", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2010/3984/0/3984a398", "title": "Interactive Manipulation of Augmented Objects in Marker-Less AR Using Vision-Based Hand Interaction", "doi": null, "abstractUrl": "/proceedings-article/itng/2010/3984a398/12OmNznkJTM", "parentPublication": { "id": "proceedings/itng/2010/3984/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a178", "title": "NailRing: An Intelligent Ring for Recognizing Micro-gestures in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a178/1JrQTEHcxXy", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzWfp8s", "title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)", "acronym": "svr", "groupId": "1800426", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNxzMnKG", "doi": "10.1109/SVR.2017.9", "title": "Gesture-Based Manipulation of Virtual Terrains on an Augmented Reality Environment", "normalizedTitle": "Gesture-Based Manipulation of Virtual Terrains on an Augmented Reality Environment", "abstract": "This paper presents a free hand interface for modeling virtual terrains in an augmented reality environment. A secondary contribution of this work is to present a study of a set of suitable gestures that allows the manipulation of virtual terrains interactively, using augmented reality markers and the Leap Motion Controller. To demonstrate the study, an application was developed to allow the user to interact with the virtual terrain directly with his/her bare hands. Virtual objects were augmented using fiducial markers and the detection is done through the AVRLib Library. In our test layout, both markers and controller are co-planar. The application was tested and evaluated by six subjects. None of the subjects had previous knowledge on how to use the Leap Motion controller or the nature of the application.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a free hand interface for modeling virtual terrains in an augmented reality environment. A secondary contribution of this work is to present a study of a set of suitable gestures that allows the manipulation of virtual terrains interactively, using augmented reality markers and the Leap Motion Controller. To demonstrate the study, an application was developed to allow the user to interact with the virtual terrain directly with his/her bare hands. Virtual objects were augmented using fiducial markers and the detection is done through the AVRLib Library. In our test layout, both markers and controller are co-planar. The application was tested and evaluated by six subjects. None of the subjects had previous knowledge on how to use the Leap Motion controller or the nature of the application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a free hand interface for modeling virtual terrains in an augmented reality environment. A secondary contribution of this work is to present a study of a set of suitable gestures that allows the manipulation of virtual terrains interactively, using augmented reality markers and the Leap Motion Controller. To demonstrate the study, an application was developed to allow the user to interact with the virtual terrain directly with his/her bare hands. Virtual objects were augmented using fiducial markers and the detection is done through the AVRLib Library. In our test layout, both markers and controller are co-planar. The application was tested and evaluated by six subjects. None of the subjects had previous knowledge on how to use the Leap Motion controller or the nature of the application.", "fno": "3588a001", "keywords": [ "Augmented Reality", "Gesture Recognition", "Human Computer Interaction", "Motion Control", "Solid Modelling", "Augmented Reality Environment", "Free Hand Interface", "Augmented Reality Markers", "Leap Motion Controller", "Virtual Objects", "Leap Motion Controller", "Gesture Based Manipulation", "Virtual Terrain Modelling", "Fiducial Markers", "AVR Lib Library", "Test Layout", "User Interface", "Augmented Reality", "Three Dimensional Displays", "Libraries", "Performance Evaluation", "Layout", "Gesture Recognition" ], "authors": [ { "affiliation": null, "fullName": "Allan Amaral Ribeiro", "givenName": "Allan Amaral", "surname": "Ribeiro", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Douglas Coelho Braga de Oliveira", "givenName": "Douglas Coelho Braga de", "surname": "Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rodrigo Luis de Souza da Silva", "givenName": "Rodrigo Luis de Souza da", "surname": "Silva", "__typename": "ArticleAuthorType" } ], "idPrefix": "svr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-11-01T00:00:00", "pubType": "proceedings", "pages": "1-7", "year": "2017", "issn": null, "isbn": "978-1-5386-3588-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3588ztoc", "articleId": "12OmNwDj1d3", "__typename": "AdjacentArticleType" }, "next": { "fno": "3588a008", "articleId": "12OmNAZx8Sa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2013/5001/0/06655769", "title": "A Markeless Augmented Reality Tracking for Enhancing the User Interaction during Virtual Rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655769/12OmNARRYqU", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2016/0662/0/0662a358", "title": "Mobile Augmented Reality Authoring Tool", "doi": null, "abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC", "parentPublication": { "id": "proceedings/icsc/2016/0662/0", "title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492784", "title": "Dynamic Texturing of Real Objects in an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492784/12OmNAnuTkI", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325483", "title": "A preliminary study of a hybrid user interface for augmented reality applications", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325483/12OmNBtl1pV", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492781", "title": "Dynamic texturing of real objects in an augmented reality system", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492781/12OmNwkhTh2", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836466", "title": "Mobile Augmented Reality Based on Invisible Marker", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836466/12OmNx7G5Tm", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2015/7962/0/7962a265", "title": "Augmented Tattoo: Evaluation of an Augmented Reality System for Tattoo Visualization", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a265/12OmNxE2mGy", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-amh/2009/5508/0/05336728", "title": "Augmented Reality (AR) joiners, a novel expanded cinematic form", "doi": null, "abstractUrl": "/proceedings-article/ismar-amh/2009/05336728/12OmNxRnvVT", "parentPublication": { "id": "proceedings/ismar-amh/2009/5508/0", "title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/01/mcg2013010012", "title": "Magic Cards: A New Augmented-Reality Approach", "doi": null, "abstractUrl": "/magazine/cg/2013/01/mcg2013010012/13rRUxBa5hz", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199555", "title": "Understanding Multimodal User Gesture and Speech Behavior for Object Manipulation in Augmented Reality Using Elicitation", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199555/1ncgzvoHSBG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs0kyqL", "title": "Information, Intelligence, and Systems, International Conference on", "acronym": "iciis", "groupId": "1002374", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuZUnH", "doi": "10.1109/ICIIS.1999.810285", "title": "Toward Multimodal Interpretation in a Natural Speech/Gesture Interface", "normalizedTitle": "Toward Multimodal Interpretation in a Natural Speech/Gesture Interface", "abstract": "Hand gestures and speech comprise the most important modalities of human to human interaction. Motivated by this, there has been a considerable interest in incorporating these modalities for \"natural\" human-computer interaction (HCI) particularly within virtual environments. An important feature of such a natural interface would be an absence of predefined speech and gesture commands. The resulting bimodal speech/gesture HCI \"language\" would thus have to be interpreted by the computer. This involves challenge ranging from the low-level signal processing of bimodal (audio/video) input to the high level interpretation of natural speech/gesture in HCI. This paper identifies the issues of natural (non-prefixed) multimodal HCI interpretation. Since, in the natural interaction, gestures do not exhibit one-to-one mapping of their form to meaning, we specifically address problems associated with vision-based gesture interpretation in a multimodal interface. We consider the design of a speech/gesture interface in the context of a set of spatial tasks defined on a computerized campus map. The task context makes it possible to study the critical components of the multimodal interpretation and integration problem.", "abstracts": [ { "abstractType": "Regular", "content": "Hand gestures and speech comprise the most important modalities of human to human interaction. Motivated by this, there has been a considerable interest in incorporating these modalities for \"natural\" human-computer interaction (HCI) particularly within virtual environments. An important feature of such a natural interface would be an absence of predefined speech and gesture commands. The resulting bimodal speech/gesture HCI \"language\" would thus have to be interpreted by the computer. This involves challenge ranging from the low-level signal processing of bimodal (audio/video) input to the high level interpretation of natural speech/gesture in HCI. This paper identifies the issues of natural (non-prefixed) multimodal HCI interpretation. Since, in the natural interaction, gestures do not exhibit one-to-one mapping of their form to meaning, we specifically address problems associated with vision-based gesture interpretation in a multimodal interface. We consider the design of a speech/gesture interface in the context of a set of spatial tasks defined on a computerized campus map. The task context makes it possible to study the critical components of the multimodal interpretation and integration problem.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Hand gestures and speech comprise the most important modalities of human to human interaction. Motivated by this, there has been a considerable interest in incorporating these modalities for \"natural\" human-computer interaction (HCI) particularly within virtual environments. An important feature of such a natural interface would be an absence of predefined speech and gesture commands. The resulting bimodal speech/gesture HCI \"language\" would thus have to be interpreted by the computer. This involves challenge ranging from the low-level signal processing of bimodal (audio/video) input to the high level interpretation of natural speech/gesture in HCI. This paper identifies the issues of natural (non-prefixed) multimodal HCI interpretation. Since, in the natural interaction, gestures do not exhibit one-to-one mapping of their form to meaning, we specifically address problems associated with vision-based gesture interpretation in a multimodal interface. We consider the design of a speech/gesture interface in the context of a set of spatial tasks defined on a computerized campus map. The task context makes it possible to study the critical components of the multimodal interpretation and integration problem.", "fno": "04460328", "keywords": [ "Gesture Recognition", "Multimodal Interface", "Gesture Interpretation", "Modality Integration" ], "authors": [ { "affiliation": "Pennsylvania State University", "fullName": "Sanshzar Kettebekov", "givenName": "Sanshzar", "surname": "Kettebekov", "__typename": "ArticleAuthorType" }, { "affiliation": "Pennsylvania State University", "fullName": "Rajeev Sharma", "givenName": "Rajeev", "surname": "Sharma", "__typename": "ArticleAuthorType" } ], "idPrefix": "iciis", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-03-01T00:00:00", "pubType": "proceedings", "pages": "328", "year": "1999", "issn": null, "isbn": "0-7695-0446-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04460324", "articleId": "12OmNwErpzR", "__typename": "AdjacentArticleType" }, "next": { "fno": "04460336", "articleId": "12OmNs0TKSZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1pBMeBWXAZ2", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pBMk3pKsEw", "doi": "10.1109/ISMAR-Adjunct51615.2020.00052", "title": "Comparing Single-modal and Multimodal Interaction in an Augmented Reality System", "normalizedTitle": "Comparing Single-modal and Multimodal Interaction in an Augmented Reality System", "abstract": "Multimodal interaction is expected to offer better user experience in Augmented Reality (AR), and thus becomes a recent research focus. However, due to the lack of hardware-level support, most existing works only combine two modalities at a time, e.g., gesture and speech. Gaze-based interaction techniques have been explored for the screen-based application, but rarely been used in AR systemsy configurable augmented reality system. In this paper, we propose a multimodal interactive system that integrates gaze, gesture and speech in a flexibly configurable augmented reality system. Our lightweight head-mounted device supports accurate gaze tracking, hand gesture recognition and speech recognition simultaneously. More importantly, the system can be easily configured into different modality combinations to study the effects of different interaction techniques. We evaluated the system in the table lamps scenario, and compared the performance of different interaction techniques. The experimental results show that the Gaze+Gesture+Speech is superior in terms of performance.", "abstracts": [ { "abstractType": "Regular", "content": "Multimodal interaction is expected to offer better user experience in Augmented Reality (AR), and thus becomes a recent research focus. However, due to the lack of hardware-level support, most existing works only combine two modalities at a time, e.g., gesture and speech. Gaze-based interaction techniques have been explored for the screen-based application, but rarely been used in AR systemsy configurable augmented reality system. In this paper, we propose a multimodal interactive system that integrates gaze, gesture and speech in a flexibly configurable augmented reality system. Our lightweight head-mounted device supports accurate gaze tracking, hand gesture recognition and speech recognition simultaneously. More importantly, the system can be easily configured into different modality combinations to study the effects of different interaction techniques. We evaluated the system in the table lamps scenario, and compared the performance of different interaction techniques. The experimental results show that the Gaze+Gesture+Speech is superior in terms of performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multimodal interaction is expected to offer better user experience in Augmented Reality (AR), and thus becomes a recent research focus. However, due to the lack of hardware-level support, most existing works only combine two modalities at a time, e.g., gesture and speech. Gaze-based interaction techniques have been explored for the screen-based application, but rarely been used in AR systemsy configurable augmented reality system. In this paper, we propose a multimodal interactive system that integrates gaze, gesture and speech in a flexibly configurable augmented reality system. Our lightweight head-mounted device supports accurate gaze tracking, hand gesture recognition and speech recognition simultaneously. More importantly, the system can be easily configured into different modality combinations to study the effects of different interaction techniques. We evaluated the system in the table lamps scenario, and compared the performance of different interaction techniques. The experimental results show that the Gaze+Gesture+Speech is superior in terms of performance.", "fno": "767500a165", "keywords": [ "Augmented Reality", "Gesture Recognition", "Helmet Mounted Displays", "Human Computer Interaction", "Interactive Systems", "Object Tracking", "Speech Recognition", "Multimodal Interaction", "User Experience", "Hardware Level Support", "Gaze Based Interaction Techniques", "Screen Based Application", "AR Systems", "Multimodal Interactive System", "Flexibly Configurable Augmented Reality System", "Lightweight Head Mounted Device", "Gaze Tracking", "Hand Gesture Recognition", "Speech Recognition", "Modality Combination", "Single Modal Interaction", "Performance Evaluation", "Design Methodology", "Interactive Systems", "Speech Recognition", "Gesture Recognition", "User Experience", "Augmented Reality", "Multimodal Interaction", "Augmented Reality", "Gaze", "Gesture", "Speech", "AR System" ], "authors": [ { "affiliation": "Beihang University,State Key Laboratory of VR Technology and Systems, School of CSE", "fullName": "Zhimin Wang", "givenName": "Zhimin", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of VR Technology and Systems, School of CSE", "fullName": "Huangyue Yu", "givenName": "Huangyue", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Peng Cheng Laboratory,Shenzhen,China", "fullName": "Haofei Wang", "givenName": "Haofei", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of VR Technology and Systems, School of CSE", "fullName": "Zongji Wang", "givenName": "Zongji", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of VR Technology and Systems, School of CSE", "fullName": "Feng Lu", "givenName": "Feng", "surname": "Lu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "165-166", "year": "2020", "issn": null, "isbn": "978-1-7281-7675-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "767500a158", "articleId": "1pBMfJqqtQ4", "__typename": "AdjacentArticleType" }, "next": { "fno": "767500a167", "articleId": "1pBMfN4nbnq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icisa/2011/9222/0/05772406", "title": "Design and Implementation of an Augmented Reality System Using Gaze Interaction", "doi": null, "abstractUrl": "/proceedings-article/icisa/2011/05772406/12OmNAWpyrv", "parentPublication": { "id": "proceedings/icisa/2011/9222/0", "title": "2011 International Conference on Information Science and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325483", "title": "A preliminary study of a hybrid user interface for augmented reality applications", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325483/12OmNBtl1pV", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pov/2011/035/0/05712368", "title": "Augmented reality for immersive remote collaboration", "doi": null, "abstractUrl": "/proceedings-article/pov/2011/05712368/12OmNqJHFHw", "parentPublication": { "id": "proceedings/pov/2011/035/0", "title": "2011 Workshop on Person-Oriented Vision (POV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a517", "title": "An Immersive System with Multi-Modal Human-Computer Interaction", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a517/12OmNxwncbv", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325481", "title": "ARZombie: A mobile augmented reality game with multimodal interaction", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325481/12OmNzV70qo", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciis/1999/0446/0/04460328", "title": "Toward Multimodal Interpretation in a Natural Speech/Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/iciis/1999/04460328/12OmNzuZUnH", "parentPublication": { "id": "proceedings/iciis/1999/0446/0", "title": "Information, Intelligence, and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/01/mcg2014010077", "title": "Hands in Space: Gesture Interaction with Augmented-Reality Interfaces", "doi": null, "abstractUrl": "/magazine/cg/2014/01/mcg2014010077/13rRUwcS1uZ", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/03/mcg2006030062", "title": "Multimodal Interaction with a Wearable Augmented Reality System", "doi": null, "abstractUrl": "/magazine/cg/2006/03/mcg2006030062/13rRUxN5evB", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864548", "title": "The Usability of the Microsoft HoloLens for an Augmented Reality Game to Teach Elementary School Children", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864548/1e5ZpUVkjVS", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199555", "title": "Understanding Multimodal User Gesture and Speech Behavior for Object Manipulation in Augmented Reality Using Elicitation", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199555/1ncgzvoHSBG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wKCdeg89vq", "title": "2022 IEEE Symposium on Security and Privacy (SP)", "acronym": "sp", "groupId": "1000646", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1FlQAqZC968", "doi": "10.1109/SP46214.2022.9833718", "title": "Invisible Finger: Practical Electromagnetic Interference Attack on Touchscreen-based Electronic Devices", "normalizedTitle": "Invisible Finger: Practical Electromagnetic Interference Attack on Touchscreen-based Electronic Devices", "abstract": "Touchscreen-based electronic devices such as smart phones and smart tablets are widely used in our daily life. While the security of electronic devices have been heavily investigated recently, the resilience of touchscreens against various attacks has yet to be thoroughly investigated. In this paper, for the first time, we show that touchscreen-based electronic devices are vulnerable to intentional electromagnetic interference (IEMI) attacks in a systematic way and how to conduct this attack in a practical way. Our contribution lies in not just demonstrating the attack, but also analyzing and quantifying the underlying mechanism allowing the novel IEMI attack on touchscreens in detail. We show how to calculate both the minimum amount of electric field and signal frequency required to induce touchscreen ghost touches. We further analyze our IEMI attack on real touchscreens with different magnitudes, frequencies, duration, and multitouch patterns. The mechanism of controlling the touchscreen-enabled electronic devices with IEMI signals is also elaborated. We design and evaluate an out-of-sight touchscreen locator and touch injection feedback mechanism to assist a practical IEMI attack. Our attack works directly on the touchscreen circuit regardless of the touchscreen scanning mechanism or operating system. Our attack can inject short-tap, long-press, and omnidirectional gestures on touchscreens from a distance larger than the average thickness of common tabletops. Compared with the state-of-the-art touchscreen attack, ours can accurately inject different types of touch events without the need for sensing signal synchronization, which makes our attack more robust and practical. In addition, rather than showing a simple proof-of-concept attack, we present and demonstrate the first ready-to-use IEMI based touchscreen attack vector with end-to-end attack scenarios", "abstracts": [ { "abstractType": "Regular", "content": "Touchscreen-based electronic devices such as smart phones and smart tablets are widely used in our daily life. While the security of electronic devices have been heavily investigated recently, the resilience of touchscreens against various attacks has yet to be thoroughly investigated. In this paper, for the first time, we show that touchscreen-based electronic devices are vulnerable to intentional electromagnetic interference (IEMI) attacks in a systematic way and how to conduct this attack in a practical way. Our contribution lies in not just demonstrating the attack, but also analyzing and quantifying the underlying mechanism allowing the novel IEMI attack on touchscreens in detail. We show how to calculate both the minimum amount of electric field and signal frequency required to induce touchscreen ghost touches. We further analyze our IEMI attack on real touchscreens with different magnitudes, frequencies, duration, and multitouch patterns. The mechanism of controlling the touchscreen-enabled electronic devices with IEMI signals is also elaborated. We design and evaluate an out-of-sight touchscreen locator and touch injection feedback mechanism to assist a practical IEMI attack. Our attack works directly on the touchscreen circuit regardless of the touchscreen scanning mechanism or operating system. Our attack can inject short-tap, long-press, and omnidirectional gestures on touchscreens from a distance larger than the average thickness of common tabletops. Compared with the state-of-the-art touchscreen attack, ours can accurately inject different types of touch events without the need for sensing signal synchronization, which makes our attack more robust and practical. In addition, rather than showing a simple proof-of-concept attack, we present and demonstrate the first ready-to-use IEMI based touchscreen attack vector with end-to-end attack scenarios", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Touchscreen-based electronic devices such as smart phones and smart tablets are widely used in our daily life. While the security of electronic devices have been heavily investigated recently, the resilience of touchscreens against various attacks has yet to be thoroughly investigated. In this paper, for the first time, we show that touchscreen-based electronic devices are vulnerable to intentional electromagnetic interference (IEMI) attacks in a systematic way and how to conduct this attack in a practical way. Our contribution lies in not just demonstrating the attack, but also analyzing and quantifying the underlying mechanism allowing the novel IEMI attack on touchscreens in detail. We show how to calculate both the minimum amount of electric field and signal frequency required to induce touchscreen ghost touches. We further analyze our IEMI attack on real touchscreens with different magnitudes, frequencies, duration, and multitouch patterns. The mechanism of controlling the touchscreen-enabled electronic devices with IEMI signals is also elaborated. We design and evaluate an out-of-sight touchscreen locator and touch injection feedback mechanism to assist a practical IEMI attack. Our attack works directly on the touchscreen circuit regardless of the touchscreen scanning mechanism or operating system. Our attack can inject short-tap, long-press, and omnidirectional gestures on touchscreens from a distance larger than the average thickness of common tabletops. Compared with the state-of-the-art touchscreen attack, ours can accurately inject different types of touch events without the need for sensing signal synchronization, which makes our attack more robust and practical. In addition, rather than showing a simple proof-of-concept attack, we present and demonstrate the first ready-to-use IEMI based touchscreen attack vector with end-to-end attack scenarios", "fno": "131600b548", "keywords": [ "Privacy", "Systematics", "Operating Systems", "Electromagnetic Interference", "Touch Sensitive Screens", "Sensors", "Security" ], "authors": [ { "affiliation": "University of Florida", "fullName": "Haoqi Shan", "givenName": "Haoqi", "surname": "Shan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Boyi Zhang", "givenName": "Boyi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Zihao Zhan", "givenName": "Zihao", "surname": "Zhan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of New Hampshire", "fullName": "Dean Sullivan", "givenName": "Dean", "surname": "Sullivan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Shuo Wang", "givenName": "Shuo", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Yier Jin", "givenName": "Yier", "surname": "Jin", "__typename": "ArticleAuthorType" } ], "idPrefix": "sp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-05-01T00:00:00", "pubType": "proceedings", "pages": "1246-1262", "year": "2022", "issn": null, "isbn": "978-1-6654-1316-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "131600a808", "articleId": "1FlQvuYBVTO", "__typename": "AdjacentArticleType" }, "next": { "fno": "131600a019", "articleId": "1FlQG3ihxEk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446457", "title": "Memory Task Performance Across Augmented and Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446457/13bd1fph1yg", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2016/04/mpc2016040020", "title": "ForcePhone: Software Lets Smartphones Sense Touch Force", "doi": null, "abstractUrl": "/magazine/pc/2016/04/mpc2016040020/13rRUwcS1w6", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2018/01/mpc2018010015", "title": "Ability-Based Optimization of Touchscreen Interactions", "doi": null, "abstractUrl": "/magazine/pc/2018/01/mpc2018010015/13rRUxBa5ur", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2022/1316/0/131600b537", "title": "WIGHT: Wired Ghost Touch Attack on Capacitive Touchscreens", "doi": null, "abstractUrl": "/proceedings-article/sp/2022/131600b537/1FlQCm4Upqg", "parentPublication": { "id": "proceedings/sp/2022/1316/0/", "title": "2022 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09920171", "title": "Touchscreens Can Reveal User Identity: Capacitive Plethysmogram-Based Biometrics", "doi": null, "abstractUrl": "/journal/tm/5555/01/09920171/1HxSjl47M0o", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2021/02/08869844", "title": "An Indirect Eavesdropping Attack of Keystrokes on Touch Screen through Acoustic Sensing", "doi": null, "abstractUrl": "/journal/tm/2021/02/08869844/1e9h1DcgdQk", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09212653", "title": "Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers", "doi": null, "abstractUrl": "/journal/tg/2020/12/09212653/1nG96pJ3dKg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/10/09347817", "title": "Enabling Finger-Touch-Based Mobile User Authentication via Physical Vibrations on IoT Devices", "doi": null, "abstractUrl": "/journal/tm/2022/10/09347817/1qWIn2ul5de", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a544", "title": "Interactive Image Exploration for Visually Impaired Readers using Audio-augmented Touch Gestures", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a544/1rSRe7XlXwI", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KExIO2iqw8", "title": "2022 International Conference on Computers, Information Processing and Advanced Education (CIPAE)", "acronym": "cipae", "groupId": "1840724", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KExKVfevh6", "doi": "10.1109/CIPAE55637.2022.00076", "title": "Research on the application of online teaching information intelligent interaction system based on virtual reality technology", "normalizedTitle": "Research on the application of online teaching information intelligent interaction system based on virtual reality technology", "abstract": "Based on the virtual reality (VR) and augmented reality (AR) technologies, the virtual reality interactive model platform for online teaching of engineering graphics is developed using Unity3D development platform, 3ds MAX, UV modeling software and C# interactive programming tools. The platform's underlying design, functional and technical development scheme, content design, and management strategy of the platform are introduced in detail. The platform can give full play to the respective advantages of VR and AR technologies, which can help overcome the obstacles in model display and interaction in online teaching of engineering drawing courses and provide a useful trial and exploration for the information construction of engineering drawing courses.", "abstracts": [ { "abstractType": "Regular", "content": "Based on the virtual reality (VR) and augmented reality (AR) technologies, the virtual reality interactive model platform for online teaching of engineering graphics is developed using Unity3D development platform, 3ds MAX, UV modeling software and C# interactive programming tools. The platform's underlying design, functional and technical development scheme, content design, and management strategy of the platform are introduced in detail. The platform can give full play to the respective advantages of VR and AR technologies, which can help overcome the obstacles in model display and interaction in online teaching of engineering drawing courses and provide a useful trial and exploration for the information construction of engineering drawing courses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Based on the virtual reality (VR) and augmented reality (AR) technologies, the virtual reality interactive model platform for online teaching of engineering graphics is developed using Unity3D development platform, 3ds MAX, UV modeling software and C# interactive programming tools. The platform's underlying design, functional and technical development scheme, content design, and management strategy of the platform are introduced in detail. The platform can give full play to the respective advantages of VR and AR technologies, which can help overcome the obstacles in model display and interaction in online teaching of engineering drawing courses and provide a useful trial and exploration for the information construction of engineering drawing courses.", "fno": "681200a337", "keywords": [ "Augmented Reality", "Engineering Graphics", "Interactive Systems", "Solid Modelling", "Teaching", "Virtual Reality", "3 Ds MAX", "C Interactive Programming Tools", "Content Design", "Engineering Drawing Courses", "Engineering Graphics", "Model Display", "Online Teaching Information Intelligent Interaction System", "Technical Development Scheme", "Unity 3 D Development Platform", "UV Modeling Software", "Virtual Reality Interactive Model Platform", "Virtual Reality Technology", "Solid Modeling", "Three Dimensional Displays", "Distance Learning", "Engineering Drawings", "Education", "Information Processing", "Touch Sensitive Screens", "Virtual Reality VR", "Augmented Reality AR", "Engineering Graphics", "Online Education" ], "authors": [ { "affiliation": "Chengdu Polytechnic,Chengdu,China", "fullName": "Xia Yang", "givenName": "Xia", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cipae", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "337-340", "year": "2022", "issn": null, "isbn": "978-1-6654-6812-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "681200a333", "articleId": "1KExSNyp0Ag", "__typename": "AdjacentArticleType" }, "next": { "fno": "681200a341", "articleId": "1KExQChp8k0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/1999/0210/0/02100032", "title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks", "doi": null, "abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR", "parentPublication": { "id": "proceedings/iv/1999/0210/0", "title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446457", "title": "Memory Task Performance Across Augmented and Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446457/13bd1fph1yg", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699278", "title": "Hybrid UIs for Music Exploration in AR and VR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699278/19F1NJTrBfi", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a962", "title": "Distant Hand Interaction Framework in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a962/1CJe3HP2vx6", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874256", "title": "Efficient Flower Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090500", "title": "Towards an Immersive Guided Virtual Reality Microfabrication Laboratory Training System", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090500/1jIxsMpiX2o", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09212653", "title": "Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers", "doi": null, "abstractUrl": "/journal/tg/2020/12/09212653/1nG96pJ3dKg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a203", "title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a492", "title": "Determining the Target Point of the Mid-Air Pinch Gesture", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a492/1tnXsQx2NOw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIx7fmpQ9a", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxfvWzz6o", "doi": "10.1109/VR46266.2020.00037", "title": "Touch the Wall: Comparison of Virtual and Augmented Reality with Conventional 2D Screen Eye-Hand Coordination Training Systems", "normalizedTitle": "Touch the Wall: Comparison of Virtual and Augmented Reality with Conventional 2D Screen Eye-Hand Coordination Training Systems", "abstract": "Previous research on eye-hand coordination training systems has investigated user performance on a wall, 2D touchscreens, and in Virtual Reality (VR). In this paper, we designed an eye-hand coordination reaction test to investigate and compare user performance in three different virtual environments (VEs) – VR, Augmented Reality (AR), and a 2D touchscreen. VR and AR conditions also included two feedback conditions – mid-air and passive haptics. Results showed that compared to AR, participants were significantly faster and made fewer errors both in 2D and VR. However, compared to VR and AR, throughput performance of the participants was significantly higher in the 2D touchscreen condition. No significant differences were found between the two feedback conditions. The results show the importance of assessing precision and accuracy in eye-hand coordination training and suggest that it is currently not advisable to use AR headsets in such systems.", "abstracts": [ { "abstractType": "Regular", "content": "Previous research on eye-hand coordination training systems has investigated user performance on a wall, 2D touchscreens, and in Virtual Reality (VR). In this paper, we designed an eye-hand coordination reaction test to investigate and compare user performance in three different virtual environments (VEs) – VR, Augmented Reality (AR), and a 2D touchscreen. VR and AR conditions also included two feedback conditions – mid-air and passive haptics. Results showed that compared to AR, participants were significantly faster and made fewer errors both in 2D and VR. However, compared to VR and AR, throughput performance of the participants was significantly higher in the 2D touchscreen condition. No significant differences were found between the two feedback conditions. The results show the importance of assessing precision and accuracy in eye-hand coordination training and suggest that it is currently not advisable to use AR headsets in such systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Previous research on eye-hand coordination training systems has investigated user performance on a wall, 2D touchscreens, and in Virtual Reality (VR). In this paper, we designed an eye-hand coordination reaction test to investigate and compare user performance in three different virtual environments (VEs) – VR, Augmented Reality (AR), and a 2D touchscreen. VR and AR conditions also included two feedback conditions – mid-air and passive haptics. Results showed that compared to AR, participants were significantly faster and made fewer errors both in 2D and VR. However, compared to VR and AR, throughput performance of the participants was significantly higher in the 2D touchscreen condition. No significant differences were found between the two feedback conditions. The results show the importance of assessing precision and accuracy in eye-hand coordination training and suggest that it is currently not advisable to use AR headsets in such systems.", "fno": "09089504", "keywords": [ "Training", "Haptic Interfaces", "Task Analysis", "Two Dimensional Displays", "Throughput", "Mathematical Model", "Human Centered Computing", "Human Computer Interaction HCI", "Human Centered Computing", "Virtual Reality", "Human Centered Computing", "Pointing", "Human Centered Computing", "Touch Screens" ], "authors": [ { "affiliation": "Simon Fraser University,School of Interactive Arts and Technology (SIAT),Vancouver,Canada", "fullName": "Anil Ufuk Batmaz", "givenName": "Anil Ufuk", "surname": "Batmaz", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,School of Interactive Arts and Technology (SIAT),Vancouver,Canada", "fullName": "Aunnoy K Mutasim", "givenName": "Aunnoy K", "surname": "Mutasim", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,School of Interactive Arts and Technology (SIAT),Vancouver,Canada", "fullName": "Morteza Malekmakan", "givenName": "Morteza", "surname": "Malekmakan", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,School of Interactive Arts and Technology (SIAT),Vancouver,Canada", "fullName": "Elham Sadr", "givenName": "Elham", "surname": "Sadr", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,School of Interactive Arts and Technology (SIAT),Vancouver,Canada", "fullName": "Wolfgang Stuerzlinger", "givenName": "Wolfgang", "surname": "Stuerzlinger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "184-193", "year": "2020", "issn": null, "isbn": "978-1-7281-5608-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09089587", "articleId": "1jIx86HRTKU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09089476", "articleId": "1jIxaFnm0GQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892247", "title": "MagicToon: A 2D-to-3D creative cartoon modeling system with mobile AR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892247/12OmNxjjEhC", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549376", "title": "Touch experience in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549376/12OmNy2agRt", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446053", "title": "High-Fidelity Interaction for Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446053/13bd1tl2omt", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2019/01/08552403", "title": "Edge Caching and Computing in 5G for Mobile AR/VR and Tactile Internet", "doi": null, "abstractUrl": "/magazine/mu/2019/01/08552403/17D45VVho1X", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049755", "title": "Leveling the Playing Field: A Comparative Reevaluation of Unmodified Eye Tracking as an Input and Interaction Modality for VR", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049755/1KYoozDk3v2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a066", "title": "Using Eye Tracked Virtual Reality to Classify Understanding of Vocabulary in Recall Tasks", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a066/1grOknc7EUE", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089598", "title": "Implementation and Evaluation of Touch-based Interaction Using Electrovibration Haptic Feedback in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089598/1jIxb4ZNizS", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09212653", "title": "Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers", "doi": null, "abstractUrl": "/journal/tg/2020/12/09212653/1nG96pJ3dKg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev-&-icivpr/2020/9331/0/09306576", "title": "Keynote Talk 2: Eye Movement Detection Sensors, Biometrics, and Health Assessment", "doi": null, "abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306576/1qcifkaN86k", "parentPublication": { "id": "proceedings/iciev-&-icivpr/2020/9331/0", "title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a367", "title": "TEyeD: Over 20 Million Real-World Eye Images with Pupil, Eyelid, and Iris 2D and 3D Segmentations, 2D and 3D Landmarks, 3D Eyeball, Gaze Vector, and Eye Movement Types", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a367/1yeD3XlUpBS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxsMpiX2o", "doi": "10.1109/VRW50115.2020.00249", "title": "Towards an Immersive Guided Virtual Reality Microfabrication Laboratory Training System", "normalizedTitle": "Towards an Immersive Guided Virtual Reality Microfabrication Laboratory Training System", "abstract": "In this paper, we present a 3D virtual reality-based interactive laboratory training system that provided training on how to operate a variety of machines in a microfabrication lab environment. The training system focused on providing fully immersive guided learning features that helped users to learn the lab operations independently. The system consisted of a hint system that automatically highlights lab tools, VR hand controller assistance, and an auto scoring system. Ten participants were tested using the system. Preliminary results showed clear improvement in learning speed, independent learning ability, and error reductions during this immersive guided VR learning environment.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a 3D virtual reality-based interactive laboratory training system that provided training on how to operate a variety of machines in a microfabrication lab environment. The training system focused on providing fully immersive guided learning features that helped users to learn the lab operations independently. The system consisted of a hint system that automatically highlights lab tools, VR hand controller assistance, and an auto scoring system. Ten participants were tested using the system. Preliminary results showed clear improvement in learning speed, independent learning ability, and error reductions during this immersive guided VR learning environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a 3D virtual reality-based interactive laboratory training system that provided training on how to operate a variety of machines in a microfabrication lab environment. The training system focused on providing fully immersive guided learning features that helped users to learn the lab operations independently. The system consisted of a hint system that automatically highlights lab tools, VR hand controller assistance, and an auto scoring system. Ten participants were tested using the system. Preliminary results showed clear improvement in learning speed, independent learning ability, and error reductions during this immersive guided VR learning environment.", "fno": "09090500", "keywords": [ "Computer Based Training", "Virtual Reality", "Auto Scoring System", "Independent Learning", "Immersive Guided VR Learning Environment", "Immersive Guided Virtual Reality Microfabrication Laboratory Training System", "3 D Virtual Reality Based Interactive Laboratory Training System", "Microfabrication Lab Environment", "Fully Immersive Guided Learning Features", "VR Hand Controller Assistance", "Training", "Microfabrication", "Task Analysis", "Virtual Reality", "Three Dimensional Displays", "Touch Sensitive Screens", "Lithography", "Laboratory", "Training", "Virtual Reality", "Microfabrication" ], "authors": [ { "affiliation": "University of Missouri,Electrical Engineering and Computer Science Department", "fullName": "Fang Wang", "givenName": "Fang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri,School of Information Science and Learning Technologies", "fullName": "Xinhao Xu", "givenName": "Xinhao", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri,Electrical Engineering and Computer Science Department", "fullName": "Weiyu Feng", "givenName": "Weiyu", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri,School of Information Science and Learning Technologies", "fullName": "Jhon Alexander Bueno-Vesga", "givenName": "Jhon Alexander", "surname": "Bueno-Vesga", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri,Electrical Engineering and Computer Science Department", "fullName": "Zheng Liang", "givenName": "Zheng", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri,Electrical Engineering and Computer Science Department", "fullName": "Scottie Murrell", "givenName": "Scottie", "surname": "Murrell", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "796-797", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090607", "articleId": "1jIxAfKUpbO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090602", "articleId": "1jIxrupgg92", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "mags/cg/2018/02/mcg2018020057", "title": "An Analysis of VR Technology Used in Immersive Simulations with a Serious Game Perspective", "doi": null, "abstractUrl": "/magazine/cg/2018/02/mcg2018020057/13rRUwh80Nv", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a804", "title": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a804/1CJd0JOwO9a", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874256", "title": "Efficient Flower Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798126", "title": "Interaction Design for Selection and Manipulation on Immersive Touch Table Display Systems for 3D Geographic Visualization", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798126/1cJ167T0YHm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/09044162", "title": "Immersive Training: Outcomes from Small Scale AR/VR Pilot-Studies", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/09044162/1ivu6SNCXLi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09212653", "title": "Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers", "doi": null, "abstractUrl": "/journal/tg/2020/12/09212653/1nG96pJ3dKg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a078", "title": "Modeling Emotions for Training in Immersive Simulations (METIS): A Cross-Platform Virtual Classroom Study", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a078/1pBMeXqNvhK", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a633", "title": "Immersive Authoring of Virtual Reality Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a589", "title": "Simulation and Assessment of Safety Procedure in an Immersive Virtual Reality (IVR) Laboratory", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a589/1tnXRaYRcdi", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a253", "title": "Multi-touch Simulation System for Sand Painting", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a253/1vg8dCS9bhu", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnXsQx2NOw", "doi": "10.1109/VRW52623.2021.00128", "title": "Determining the Target Point of the Mid-Air Pinch Gesture", "normalizedTitle": "Determining the Target Point of the Mid-Air Pinch Gesture", "abstract": "Pinching is a common gesture primarily used for zooming on mobile devices, and previous studies considered utilizing it as a mid-air gesture in AR/VR. As opposed to touch screens, there is no physical contact point between the display and the fingers in mid-air pinching, which means the positional relationship between the target point for zooming and the users' finger movement in mid-air pinching could be different from that of touch screens. In this study, we investigated the relationship in mid-air pinching to estimate the target point from the hand posture, and found that the point was significantly off towards the thumb and away from the index finger (approximately 7% offset). This finding contributes to a more accurate mid-air zooming.", "abstracts": [ { "abstractType": "Regular", "content": "Pinching is a common gesture primarily used for zooming on mobile devices, and previous studies considered utilizing it as a mid-air gesture in AR/VR. As opposed to touch screens, there is no physical contact point between the display and the fingers in mid-air pinching, which means the positional relationship between the target point for zooming and the users' finger movement in mid-air pinching could be different from that of touch screens. In this study, we investigated the relationship in mid-air pinching to estimate the target point from the hand posture, and found that the point was significantly off towards the thumb and away from the index finger (approximately 7% offset). This finding contributes to a more accurate mid-air zooming.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Pinching is a common gesture primarily used for zooming on mobile devices, and previous studies considered utilizing it as a mid-air gesture in AR/VR. As opposed to touch screens, there is no physical contact point between the display and the fingers in mid-air pinching, which means the positional relationship between the target point for zooming and the users' finger movement in mid-air pinching could be different from that of touch screens. In this study, we investigated the relationship in mid-air pinching to estimate the target point from the hand posture, and found that the point was significantly off towards the thumb and away from the index finger (approximately 7% offset). This finding contributes to a more accurate mid-air zooming.", "fno": "405700a492", "keywords": [ "Gesture Recognition", "Human Computer Interaction", "Touch Sensitive Screens", "Target Point", "Mid Air Pinch Gesture", "Touch Screens", "Physical Contact Point", "Mid Air Pinching", "Mid Air Zooming", "Mobile Devices", "AR VR", "Users Finger Movement", "Three Dimensional Displays", "Conferences", "Thumb", "Touch Sensitive Screens", "User Interfaces", "Mobile Handsets", "Mirrors", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Techniques", "Gestural Input", "Interaction Paradigms", "Mixed Augmented Reality" ], "authors": [ { "affiliation": "The University of Tokyo", "fullName": "Reigo Ban", "givenName": "Reigo", "surname": "Ban", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Yutaro Hirao", "givenName": "Yutaro", "surname": "Hirao", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Takuji Narumi", "givenName": "Takuji", "surname": "Narumi", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "492-493", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "405700a490", "articleId": "1tnXnAd9AK4", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a494", "articleId": "1tnWQ58g52g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icrtccm/2017/4799/0/4799a153", "title": "A Secure Gesture Based Authentication Scheme to Unlock the Smartphones", "doi": null, "abstractUrl": "/proceedings-article/icrtccm/2017/4799a153/12OmNvT2p8P", "parentPublication": { "id": "proceedings/icrtccm/2017/4799/0", "title": "2017 Second International Conference on Recent Trends and Challenges in Computational Models (ICRTCCM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a242", "title": "Categorizing Issues in Mid-air InfoVis Interaction", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a242/12OmNyKrH2A", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2007/2900/0/29000935", "title": "How Feasible Are Star Wars Mid-air Displays", "doi": null, "abstractUrl": "/proceedings-article/iv/2007/29000935/12OmNyQ7FSv", "parentPublication": { "id": "proceedings/iv/2007/2900/0", "title": "2007 11th International Conference Information Visualization (IV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisis/2016/0987/0/0987a633", "title": "A Desktop 3D Modeling System Controllable by Mid-air Interactions", "doi": null, "abstractUrl": "/proceedings-article/cisis/2016/0987a633/12OmNyyO8Hb", "parentPublication": { "id": "proceedings/cisis/2016/0987/0", "title": "2016 10th International Conference on Complex, Intelligent, and Software Intensive Systems (CISIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/1991/02/s2093", "title": "Touch Screens Now Offer Compelling Uses", "doi": null, "abstractUrl": "/magazine/so/1991/02/s2093/13rRUyfbwow", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2018/2666/1/266601a339", "title": "A Japanese Software Keyboard for Tablets that Reduces User Fatigue", "doi": null, "abstractUrl": "/proceedings-article/compsac/2018/266601a339/144U9b07hJP", "parentPublication": { "id": "proceedings/compsac/2018/2666/2", "title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a722", "title": "AIR-range: Arranging optical systems to present mid-AIR images with continuous luminance on and above a tabletop", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a722/1CJd3cfYsbm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cyberc/2022/3154/0/315400a213", "title": "Machine Learning-based Gesture Recognition Using Wearable Devices", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2022/315400a213/1M66jPqDacE", "parentPublication": { "id": "proceedings/cyberc/2022/3154/0", "title": "2022 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a430", "title": "Visualization and Manipulation of Air Conditioner Flow via Touch Screen", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a430/1tnXoOh5fLW", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyQYteX", "title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)", "acronym": "aiccsa", "groupId": "1000146", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNqGiu9S", "doi": "10.1109/AICCSA.2016.7945751", "title": "Towards the growth of optical security systems for image encryption by polarized light", "normalizedTitle": "Towards the growth of optical security systems for image encryption by polarized light", "abstract": "Motivated by recent interest in polarization encoding, we present an image encryption/decryption scheme based on polarized light which provides additional flexibility in key encryption designs, and double random phase masks. In this scheme, the primary image is encrypted relying on stokes-Mueller formalism by using two spatial light modulators (SLMs). With the proposed method it is possible to control the polarization ellipse parameters and the rotation angles of the SLMs. Numerical simulation is performed for gray-scale images to demonstrate the validity of this new proposed method. The performance measurement parameters mean-square-error and peak-signal-to-noise ratio have been calculated to verify the feasibility of the scheme.", "abstracts": [ { "abstractType": "Regular", "content": "Motivated by recent interest in polarization encoding, we present an image encryption/decryption scheme based on polarized light which provides additional flexibility in key encryption designs, and double random phase masks. In this scheme, the primary image is encrypted relying on stokes-Mueller formalism by using two spatial light modulators (SLMs). With the proposed method it is possible to control the polarization ellipse parameters and the rotation angles of the SLMs. Numerical simulation is performed for gray-scale images to demonstrate the validity of this new proposed method. The performance measurement parameters mean-square-error and peak-signal-to-noise ratio have been calculated to verify the feasibility of the scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Motivated by recent interest in polarization encoding, we present an image encryption/decryption scheme based on polarized light which provides additional flexibility in key encryption designs, and double random phase masks. In this scheme, the primary image is encrypted relying on stokes-Mueller formalism by using two spatial light modulators (SLMs). With the proposed method it is possible to control the polarization ellipse parameters and the rotation angles of the SLMs. Numerical simulation is performed for gray-scale images to demonstrate the validity of this new proposed method. The performance measurement parameters mean-square-error and peak-signal-to-noise ratio have been calculated to verify the feasibility of the scheme.", "fno": "07945751", "keywords": [ "Cryptography", "Image Processing", "Mean Square Error Methods", "Polarisation", "Spatial Light Modulators", "Optical Security Systems", "Polarized Light", "Polarization Encoding", "Image Encryption Decryption Scheme", "Spatial Light Modulators", "SLM", "Polarization Ellipse Parameters", "Gray Scale Images", "Performance Measurement Parameters", "Mean Square Error", "Peak Signal To Noise Ratio", "Stokes Mueller Formalism", "Encryption", "Optical Imaging", "Optical Polarization", "Transforms", "Holography", "Optical Interferometry", "Optical Encryption", "Polarization", "Double Random Phase Encoding", "Image Processing" ], "authors": [ { "affiliation": "Lab. STRS, Institut National des Postes et Télécommunications, Rabat, Morocco", "fullName": "Wiam Zamrani", "givenName": "Wiam", "surname": "Zamrani", "__typename": "ArticleAuthorType" }, { "affiliation": "Lab. STRS, Institut National des Postes et Télécommunications, Rabat, Morocco", "fullName": "Esmail Ahouzi", "givenName": "Esmail", "surname": "Ahouzi", "__typename": "ArticleAuthorType" }, { "affiliation": "Departamento de Fisica, Universitat Autonoma de Barcelona, Bellaterra, Spain", "fullName": "Angel Lizana", "givenName": "Angel", "surname": "Lizana", "__typename": "ArticleAuthorType" }, { "affiliation": "Departamento de Fisica, Universitat Autonoma de Barcelona, Bellaterra, Spain", "fullName": "Juan Campus", "givenName": "Juan", "surname": "Campus", "__typename": "ArticleAuthorType" }, { "affiliation": "Departamento de Fisica, Universitat Autonoma de Barcelona, Bellaterra, Spain", "fullName": "Maria Josefa Yzuel", "givenName": "Maria Josefa", "surname": "Yzuel", "__typename": "ArticleAuthorType" } ], "idPrefix": "aiccsa", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-11-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2016", "issn": "2161-5330", "isbn": "978-1-5090-4320-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07945750", "articleId": "12OmNxRWIdL", "__typename": "AdjacentArticleType" }, "next": { "fno": "07945752", "articleId": "12OmNAgoV7l", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdma/2012/4772/0/4772a102", "title": "A New Optical Fiber Birefringence Measurement Method Based on Polarization Detection and Wavelength Scanning", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a102/12OmNAqCtOe", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2018/2526/0/08368471", "title": "Dynamic heterodyne interferometry", "doi": null, "abstractUrl": "/proceedings-article/iccp/2018/08368471/12OmNBhZ4pc", "parentPublication": { "id": "proceedings/iccp/2018/2526/0", "title": "2018 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isot/2014/6752/0/07119442", "title": "Holographic Femtosecond Laser Processing with Full Control of Phase Distributions and Polarization States of Light", "doi": null, "abstractUrl": "/proceedings-article/isot/2014/07119442/12OmNBkxstG", "parentPublication": { "id": "proceedings/isot/2014/6752/0", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031e552", "title": "Research on Light Polarization FSO-OFDM System", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031e552/12OmNvDqsLP", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567440", "title": "Efficient polarization squeezing in optical fibers", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567440/12OmNxVlTJn", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iqec/2005/9240/0/01560956", "title": "Development of mirror magneto-optical trap using circularly-polarized light-emitting optical fibers", "doi": null, "abstractUrl": "/proceedings-article/iqec/2005/01560956/12OmNxdVgTS", "parentPublication": { "id": "proceedings/iqec/2005/9240/0", "title": "International Quantum Electronics Conference, 2005.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031e602", "title": "A Dual-Polarized Broad-Band RCS Testing System", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031e602/12OmNxyDZdQ", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a461", "title": "Synthesis of a New Photochromic Diarylethene and its Application in Holographic Optical Storage", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a461/12OmNzZmZnP", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aicis/2020/9169/0/916900a081", "title": "Optimization of Radio over Fiber Transmission over Polarized Conversion Light Using SOA", "doi": null, "abstractUrl": "/proceedings-article/aicis/2020/916900a081/1t7llWdvbDa", "parentPublication": { "id": "proceedings/aicis/2020/9169/0", "title": "2020 2nd Annual International Conference on Information and Sciences (AiCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ctisc/2021/1868/0/186800a315", "title": "Underwater polarized image processing based on active illumination and image fusion of circular polarized light", "doi": null, "abstractUrl": "/proceedings-article/ctisc/2021/186800a315/1wG6qkbLgPK", "parentPublication": { "id": "proceedings/ctisc/2021/1868/0", "title": "2021 3rd International Conference on Advances in Computer Technology, Information Science and Communication (CTISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvoWV1c", "title": "Proceedings of the Twenty-Second Annual Hawaii International Conference on System Sciences. Volume 1: Architecture Track", "acronym": "hicss", "groupId": "1000730", "volume": "1", "displayVolume": "0", "year": "1989", "__typename": "ProceedingType" }, "article": { "id": "12OmNx8wTlX", "doi": "10.1109/HICSS.1989.47188", "title": "A coherent system for performing an optical transform", "normalizedTitle": "A coherent system for performing an optical transform", "abstract": "A coherent optical system for performing an arbitrary linear transform is described. The system consists of a holographic mask and two Fourier lenses. A set of equations for determining the amplitude-phase distribution of the mask is given, and the mask is generated by combination of a computer-generated hologram and optical holography. As an example, a Walsh-Hadamard transform of order 32 is realized.<>", "abstracts": [ { "abstractType": "Regular", "content": "A coherent optical system for performing an arbitrary linear transform is described. The system consists of a holographic mask and two Fourier lenses. A set of equations for determining the amplitude-phase distribution of the mask is given, and the mask is generated by combination of a computer-generated hologram and optical holography. As an example, a Walsh-Hadamard transform of order 32 is realized.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A coherent optical system for performing an arbitrary linear transform is described. The system consists of a holographic mask and two Fourier lenses. A set of equations for determining the amplitude-phase distribution of the mask is given, and the mask is generated by combination of a computer-generated hologram and optical holography. As an example, a Walsh-Hadamard transform of order 32 is realized.", "fno": "00047188", "keywords": [ "Computer Generated Holography", "Fourier Transform Optics", "Fourier Transforms", "Lenses", "Light Coherence", "Optical Information Processing", "Optical Systems", "Optical Transfer Function", "FT Optics", "Optical Transform", "Coherent Optical System", "Arbitrary Linear Transform", "Holographic Mask", "Fourier Lenses", "Equations", "Amplitude Phase Distribution", "Computer Generated Hologram", "Optical Holography", "Walsh Hadamard Transform", "Holography", "Holographic Optical Components", "Sampling Methods", "Equations", "Fourier Transforms", "Lenses", "Optical Propagation", "Apertures", "Physics", "Artificial Intelligence" ], "authors": [ { "affiliation": "Inst. of Phys., Acad. Sinica, Beijing, China", "fullName": "Guo-Zhen Yang", "givenName": null, "surname": "Guo-Zhen Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Phys., Acad. Sinica, Beijing, China", "fullName": "Yan-Song Chen", "givenName": null, "surname": "Yan-Song Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Phys., Acad. Sinica, Beijing, China", "fullName": "Shi-Hai Zhing", "givenName": null, "surname": "Shi-Hai Zhing", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Phys., Acad. Sinica, Beijing, China", "fullName": "Bi-Zhen Dong", "givenName": null, "surname": "Bi-Zhen Dong", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Phys., Acad. Sinica, Beijing, China", "fullName": "De-Hua Li", "givenName": null, "surname": "De-Hua Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "1989-01-01T00:00:00", "pubType": "proceedings", "pages": "445,446,447,448,449", "year": "1989", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00047187", "articleId": "12OmNyU63s0", "__typename": "AdjacentArticleType" }, "next": { "fno": "00047189", "articleId": "12OmNvjyxwe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dcc/1992/2717/0/00227478", "title": "Optical techniques for image compression", "doi": null, "abstractUrl": "/proceedings-article/dcc/1992/00227478/12OmNCxL9Rj", "parentPublication": { "id": "proceedings/dcc/1992/2717/0", "title": "1992 Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/2/4077c482", "title": "Imaging Research of Fresnel Holography", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077c482/12OmNrJiCXa", "parentPublication": { "id": "proceedings/icicta/2010/4077/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nvmt/1998/4518/0/00723221", "title": "Nonvolatile optical storage in photorefractive crystals", "doi": null, "abstractUrl": "/proceedings-article/nvmt/1998/00723221/12OmNvjQ91k", "parentPublication": { "id": "proceedings/nvmt/1998/4518/0", "title": "Seventh Biennial IEEE International Nonvolatile Memory Technology Conference. Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2010/7023/0/05585090", "title": "Computational photography and compressive holography", "doi": null, "abstractUrl": "/proceedings-article/iccp/2010/05585090/12OmNvonIOE", "parentPublication": { "id": "proceedings/iccp/2010/7023/0", "title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbeb/2012/4706/0/4706b730", "title": "Study on Method of Measuring Particle Size of Atmosphere Based on Digital Holography", "doi": null, "abstractUrl": "/proceedings-article/icbeb/2012/4706b730/12OmNxWcH9E", "parentPublication": { "id": "proceedings/icbeb/2012/4706/0", "title": "Biomedical Engineering and Biotechnology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fbie/2008/3561/0/3561a056", "title": "Information Processing in Digital Holographic Microscopy", "doi": null, "abstractUrl": "/proceedings-article/fbie/2008/3561a056/12OmNxjjEjY", "parentPublication": { "id": "proceedings/fbie/2008/3561/0", "title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2013/5004/0/5004a302", "title": "An Efficient Image Encryption and Hiding Method Applied by Double Random Phase Encoding", "doi": null, "abstractUrl": "/proceedings-article/iccis/2013/5004a302/12OmNzkMlNH", "parentPublication": { "id": "proceedings/iccis/2013/5004/0", "title": "2013 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/1979/01/01658454", "title": "Coherent Optical Computing", "doi": null, "abstractUrl": "/magazine/co/1979/01/01658454/13rRUB7a15P", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1975/04/01672828", "title": "A Real-Time Parallel Optical Processing Technique", "doi": null, "abstractUrl": "/journal/tc/1975/04/01672828/13rRUwI5UeK", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ficloudw/2019/4411/0/441100a130", "title": "Statistical Optical Image Analysis for Information System", "doi": null, "abstractUrl": "/proceedings-article/ficloudw/2019/441100a130/1iHT466xlHG", "parentPublication": { "id": "proceedings/ficloudw/2019/4411/0", "title": "2019 7th International Conference on Future Internet of Things and Cloud Workshops (FiCloudW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyGbI4Q", "title": "2010 IEEE 25th International Symposium on Defect and Fault Tolerance in VLSI Systems", "acronym": "dft", "groupId": "1000190", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyz5JSM", "doi": "10.1109/DFT.2010.55", "title": "Recovery Method for a Laser Array Failure on Dynamic Optically Reconfigurable Gate Arrays", "normalizedTitle": "Recovery Method for a Laser Array Failure on Dynamic Optically Reconfigurable Gate Arrays", "abstract": "Demand is increasing daily for a large-gate-count robust VLSI chip that can be used in a radiation-rich space environment. Since they exploit the large storage capacity of a holographic memory, optically reconfigurable gate arrays (ORGAs) have been developed to realize a much larger virtual gate count than those of current VLSI chips. The ORGA architecture is extremely robust for many failure modes caused by high-energy charged particles. Among such developments, dynamic optically reconfigurable gate arrays (DORGAs) have been developed to realize a high-gate-density VLSI using a photodiode memory architecture. Unfortunately, the DORGA architecture is more sensitive to the unallowable turn-off failure mode of a laser array. Therefore, this paper presents a recovery method for a turn-off failure mode of a laser array on a DORGA and its demonstration results.", "abstracts": [ { "abstractType": "Regular", "content": "Demand is increasing daily for a large-gate-count robust VLSI chip that can be used in a radiation-rich space environment. Since they exploit the large storage capacity of a holographic memory, optically reconfigurable gate arrays (ORGAs) have been developed to realize a much larger virtual gate count than those of current VLSI chips. The ORGA architecture is extremely robust for many failure modes caused by high-energy charged particles. Among such developments, dynamic optically reconfigurable gate arrays (DORGAs) have been developed to realize a high-gate-density VLSI using a photodiode memory architecture. Unfortunately, the DORGA architecture is more sensitive to the unallowable turn-off failure mode of a laser array. Therefore, this paper presents a recovery method for a turn-off failure mode of a laser array on a DORGA and its demonstration results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Demand is increasing daily for a large-gate-count robust VLSI chip that can be used in a radiation-rich space environment. Since they exploit the large storage capacity of a holographic memory, optically reconfigurable gate arrays (ORGAs) have been developed to realize a much larger virtual gate count than those of current VLSI chips. The ORGA architecture is extremely robust for many failure modes caused by high-energy charged particles. Among such developments, dynamic optically reconfigurable gate arrays (DORGAs) have been developed to realize a high-gate-density VLSI using a photodiode memory architecture. Unfortunately, the DORGA architecture is more sensitive to the unallowable turn-off failure mode of a laser array. Therefore, this paper presents a recovery method for a turn-off failure mode of a laser array on a DORGA and its demonstration results.", "fno": "05634942", "keywords": [ "Field Programmable Gate Arrays", "Holographic Storage", "Integrated Optoelectronics", "Laser Arrays", "Laser Modes", "Optical Logic", "Photodiodes", "Recovery", "Very High Speed Integrated Circuits", "Laser Array Failure", "Dynamic Optically Reconfigurable Gate Arrays", "High Gate Density VLSI Chip", "Photodiode Memory Architecture", "Unallowable Turn Off Failure Mode", "Field Programmable Gate Arrays", "Recovery", "Holographic Memory Storage Capacity", "Context", "Arrays", "Logic Gates", "Laser Modes", "Holographic Optical Components", "Holography", "Field Programmable Gate Arrays", "Optically Reconfigurable Gate Arrays", "Laser Arrays", "Defect Tolerance" ], "authors": [ { "affiliation": null, "fullName": "Daisaku Seto", "givenName": "Daisaku", "surname": "Seto", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Minoru Watanabe", "givenName": "Minoru", "surname": "Watanabe", "__typename": "ArticleAuthorType" } ], "idPrefix": "dft", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "411-419", "year": "2010", "issn": "1550-5774", "isbn": "978-1-4244-8447-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05634943", "articleId": "12OmNxETa4V", "__typename": "AdjacentArticleType" }, "next": { "fno": "05634947", "articleId": "12OmNy49sIQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/asap/2009/3732/0/3732a227", "title": "A 16-context Optically Reconfigurable Gate Array", "doi": null, "abstractUrl": "/proceedings-article/asap/2009/3732a227/12OmNBWzHQj", "parentPublication": { "id": "proceedings/asap/2009/3732/0", "title": "2009 20th IEEE International Conference on Application-specific Systems, Architectures and Processors", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asp-dac/2007/0629/0/04196018", "title": "A 0.35um CMOS 1,632-gate-count Zero-Overhead Dynamic Optically Reconfigurable Gate Array VLSI", "doi": null, "abstractUrl": "/proceedings-article/asp-dac/2007/04196018/12OmNqBKU6p", "parentPublication": { "id": "proceedings/asp-dac/2007/0629/0", "title": "2007 Asia and South Pacific Design Automation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ahs/2009/3714/0/3714a120", "title": "A Sixteen-Context Dynamic Optically Reconfigurable Gate Array", "doi": null, "abstractUrl": "/proceedings-article/ahs/2009/3714a120/12OmNrK9q5u", "parentPublication": { "id": "proceedings/ahs/2009/3714/0", "title": "Adaptive Hardware and Systems, NASA/ESA Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ahs/2010/5889/0/05546242", "title": "Acceleration method of optical reconfigurations using analog configuration contexts", "doi": null, "abstractUrl": "/proceedings-article/ahs/2010/05546242/12OmNs0C9WI", "parentPublication": { "id": "proceedings/ahs/2010/5889/0", "title": "2010 NASA/ESA Conference on Adaptive Hardware and Systems (AHS 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsid/2016/8700/0/8700a603", "title": "Reconfiguration Performance Recovery on Optically Reconfigurable Gate Arrays", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2016/8700a603/12OmNvA1h4S", "parentPublication": { "id": "proceedings/vlsid/2016/8700/0", "title": "2016 29th International Conference on VLSI Design and 2016 15th International Conference on Embedded Systems (VLSID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2007/0909/0/04228119", "title": "A multi-context holographic memory recording system for Optically Reconfigurable Gate Arrays", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2007/04228119/12OmNvw2TbN", "parentPublication": { "id": "proceedings/ipdps/2007/0909/0", "title": "2007 IEEE International Parallel and Distributed Processing Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvlsid/2012/4767/0/4767a308", "title": "0.18-um CMOS Process Highly Sensitive Differential Optically Reconfigurable Gate Array VLSI", "doi": null, "abstractUrl": "/proceedings-article/isvlsid/2012/4767a308/12OmNwD1q1x", "parentPublication": { "id": "proceedings/isvlsid/2012/4767/0", "title": "2012 IEEE Computer Society Annual Symposium on VLSI", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ahs/2010/5889/0/05546252", "title": "Recovery method for a turn-off failure mode of a laser array on an ORGA", "doi": null, "abstractUrl": "/proceedings-article/ahs/2010/05546252/12OmNy3iFrd", "parentPublication": { "id": "proceedings/ahs/2010/5889/0", "title": "2010 NASA/ESA Conference on Adaptive Hardware and Systems (AHS 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2006/0054/0/01639478", "title": "An optically differential reconfigurable gate array with a holographic memory", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2006/01639478/12OmNyyO8P7", "parentPublication": { "id": "proceedings/ipdps/2006/0054/0", "title": "Proceedings 20th IEEE International Parallel & Distributed Processing Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328453", "title": "Holographic Memory Calculation FPGA Accelerator for Optically Reconfigurable Gate Arrays", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328453/17D45XuDNEp", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJczHyWyjK", "doi": "10.1109/VR51125.2022.00075", "title": "Sparse Nanophotonic Phased Arrays for Energy-Efficient Holographic Displays", "normalizedTitle": "Sparse Nanophotonic Phased Arrays for Energy-Efficient Holographic Displays", "abstract": "The Nanophotonic Phased Array (NPA) is an emerging holographic display technology. With chip-scaled sizes, high refresh rates, and integrated light sources, a large-scale NPA can enable high-resolution real-time dynamic holographic displays. However, one of the critical challenges impeding the development of such large-scale NPAs is the high electrical power consumption required to modulate the amplitude and phase of each of the pixel elements. We argue that the modulation of all the elements on the array is, in fact, not necessary to produce a high-quality image. We propose a simple method that outputs the configuration of a sparse NPA, along with the amplitude and the phase required at each active pixel to generate the desired image at the observation plane. We identify the set of active pixels according to their optimized intensities. We observe that the brighter pixels have a greater influence on the target image, and it is these that we must focus on in image formation. Using as few as 10% of the total pixels from a dense 2D array of light-emitting elements, we show that a perceptually acceptable holographic image can be generated. We compare various sparse sampling methods through computational simulations and show that our proposed method gives superior qualitative and quantitative results. We believe our study will help advance research on sparse NPAs and facilitate the use of large-scale NPAs to display high-resolution 3D holographic images.", "abstracts": [ { "abstractType": "Regular", "content": "The Nanophotonic Phased Array (NPA) is an emerging holographic display technology. With chip-scaled sizes, high refresh rates, and integrated light sources, a large-scale NPA can enable high-resolution real-time dynamic holographic displays. However, one of the critical challenges impeding the development of such large-scale NPAs is the high electrical power consumption required to modulate the amplitude and phase of each of the pixel elements. We argue that the modulation of all the elements on the array is, in fact, not necessary to produce a high-quality image. We propose a simple method that outputs the configuration of a sparse NPA, along with the amplitude and the phase required at each active pixel to generate the desired image at the observation plane. We identify the set of active pixels according to their optimized intensities. We observe that the brighter pixels have a greater influence on the target image, and it is these that we must focus on in image formation. Using as few as 10% of the total pixels from a dense 2D array of light-emitting elements, we show that a perceptually acceptable holographic image can be generated. We compare various sparse sampling methods through computational simulations and show that our proposed method gives superior qualitative and quantitative results. We believe our study will help advance research on sparse NPAs and facilitate the use of large-scale NPAs to display high-resolution 3D holographic images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Nanophotonic Phased Array (NPA) is an emerging holographic display technology. With chip-scaled sizes, high refresh rates, and integrated light sources, a large-scale NPA can enable high-resolution real-time dynamic holographic displays. However, one of the critical challenges impeding the development of such large-scale NPAs is the high electrical power consumption required to modulate the amplitude and phase of each of the pixel elements. We argue that the modulation of all the elements on the array is, in fact, not necessary to produce a high-quality image. We propose a simple method that outputs the configuration of a sparse NPA, along with the amplitude and the phase required at each active pixel to generate the desired image at the observation plane. We identify the set of active pixels according to their optimized intensities. We observe that the brighter pixels have a greater influence on the target image, and it is these that we must focus on in image formation. Using as few as 10% of the total pixels from a dense 2D array of light-emitting elements, we show that a perceptually acceptable holographic image can be generated. We compare various sparse sampling methods through computational simulations and show that our proposed method gives superior qualitative and quantitative results. We believe our study will help advance research on sparse NPAs and facilitate the use of large-scale NPAs to display high-resolution 3D holographic images.", "fno": "961700a553", "keywords": [ "Holographic Displays", "Integrated Optics", "Nanophotonics", "Optical Arrays", "Integrated Light Sources", "Large Scale NPA", "High Resolution Real Time Dynamic Holographic Displays", "Pixel Elements", "High Quality Image", "Sparse NPA", "Active Pixel", "Observation Plane", "Target Image", "Image Formation", "Total Pixels", "Light Emitting Elements", "Sparse Sampling Methods", "High Resolution 3 D Holographic Images", "Sparse Nanophotonic Phased Arrays", "Phased Arrays", "Solid Modeling", "Three Dimensional Displays", "Power Demand", "Modulation", "Virtual Reality", "User Interfaces", "Computing Methodologies", "Image Processing", "Hardware", "Displays And Imagers", "Mixed Augmented Reality", "Virtual Reality" ], "authors": [ { "affiliation": "University of Maryland,College Park", "fullName": "Susmija Jabbireddy", "givenName": "Susmija", "surname": "Jabbireddy", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Yang Zhang", "givenName": "Yang", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Martin Peckerar", "givenName": "Martin", "surname": "Peckerar", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Mario Dagenais", "givenName": "Mario", "surname": "Dagenais", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Amitabh Varshney", "givenName": "Amitabh", "surname": "Varshney", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "553-562", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a543", "articleId": "1CJbIG4LCP6", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a563", "articleId": "1CJc4OzR6Q8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccima/2007/3050/3/30500287", "title": "Robust Reconstruction from Arbitrary Portions of Holographic Representation of Images", "doi": null, "abstractUrl": "/proceedings-article/iccima/2007/30500287/12OmNqBKTMJ", "parentPublication": { "id": "proceedings/iccima/2007/3050/3", "title": "2007 International Conference on Computational Intelligence and Multimedia Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isot/2014/6752/0/07119409", "title": "Multi-cell Analysis Using In-Line Holographic Approach", "doi": null, "abstractUrl": "/proceedings-article/isot/2014/07119409/12OmNzkMlVS", "parentPublication": { "id": "proceedings/isot/2014/6752/0", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a483", "title": "Interactive Mixed Reality Rendering on Holographic Pyramid", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a483/1CJcsRpGDQI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2022/5851/0/09887757", "title": "Analyzing phase masks for wide &#x00E9;tendue holographic displays", "doi": null, "abstractUrl": "/proceedings-article/iccp/2022/09887757/1GZivOBcOnS", "parentPublication": { "id": "proceedings/iccp/2022/5851/0", "title": "2022 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a237", "title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199563", "title": "Correcting the Proximity Effect in Nanophotonic Phased Arrays", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199563/1ncgvG9aJ6o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a312", "title": "Towards Eyeglass-style Holographic Near-eye Displays with Statically", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a312/1pysyaCOe76", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a353", "title": "Proximity Effect Correction for Fresnel Holograms on Nanophotonic Phased Arrays", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a353/1tuB1K9iOKk", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523842", "title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523842/1wpqr1B6wA8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgFZaCqiI", "doi": "10.1109/VR55154.2023.00057", "title": "Realistic Defocus Blur for Multiplane Computer-Generated Holography", "normalizedTitle": "Realistic Defocus Blur for Multiplane Computer-Generated Holography", "abstract": "This paper introduces a new multiplane CGH computation method to reconstruct artifact-free high-quality holograms with natural-looking defocus blur. Our method introduces a new targeting scheme and a new loss function. While the targeting scheme accounts for defocused parts of the scene at each depth plane, the new loss function analyzes focused and defocused parts separately in reconstructed images. Our method support phase-only CGH calculations using various iterative (e.g., Gerchberg-Saxton, Gradient Descent) and non-iterative (e.g., Double Phase) CGH techniques. We achieve our best image quality using a modified gradient descent-based optimization recipe where we introduce a constraint inspired by the double phase method. We validate our method experimentally using our proof-of-concept holographic display, comparing various algorithms, including multi-depth scenes with sparse and dense contents.", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces a new multiplane CGH computation method to reconstruct artifact-free high-quality holograms with natural-looking defocus blur. Our method introduces a new targeting scheme and a new loss function. While the targeting scheme accounts for defocused parts of the scene at each depth plane, the new loss function analyzes focused and defocused parts separately in reconstructed images. Our method support phase-only CGH calculations using various iterative (e.g., Gerchberg-Saxton, Gradient Descent) and non-iterative (e.g., Double Phase) CGH techniques. We achieve our best image quality using a modified gradient descent-based optimization recipe where we introduce a constraint inspired by the double phase method. We validate our method experimentally using our proof-of-concept holographic display, comparing various algorithms, including multi-depth scenes with sparse and dense contents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces a new multiplane CGH computation method to reconstruct artifact-free high-quality holograms with natural-looking defocus blur. Our method introduces a new targeting scheme and a new loss function. While the targeting scheme accounts for defocused parts of the scene at each depth plane, the new loss function analyzes focused and defocused parts separately in reconstructed images. Our method support phase-only CGH calculations using various iterative (e.g., Gerchberg-Saxton, Gradient Descent) and non-iterative (e.g., Double Phase) CGH techniques. We achieve our best image quality using a modified gradient descent-based optimization recipe where we introduce a constraint inspired by the double phase method. We validate our method experimentally using our proof-of-concept holographic display, comparing various algorithms, including multi-depth scenes with sparse and dense contents.", "fno": "481500a418", "keywords": [ "Technological Innovation", "Three Dimensional Displays", "Virtual Reality", "Holography", "User Interfaces", "Optical Imaging", "Holographic Optical Components", "Hardware X 2014 Emerging Technologies X 2014 Emerging Optical And Photonic Technology", "Hardware X 2014 Communication Hardware Interfaces And Storage X 2014 Display And Imagers" ], "authors": [ { "affiliation": "Koç University, University College London", "fullName": "Koray Kavaklı", "givenName": "Koray", "surname": "Kavaklı", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Yuta Itoh", "givenName": "Yuta", "surname": "Itoh", "__typename": "ArticleAuthorType" }, { "affiliation": "Koç University", "fullName": "Hakan Urey", "givenName": "Hakan", "surname": "Urey", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Kaan Akşit", "givenName": "Kaan", "surname": "Akşit", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "418-426", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgFQqszRu", "name": "pvr202348150-010108460s1-mm_481500a418.zip", "size": "105 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108460s1-mm_481500a418.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a409", "articleId": "1MNgA7qw20U", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a427", "articleId": "1MNgrRl0Z6E", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2010/7023/0/05585090", "title": "Computational photography and compressive holography", "doi": null, "abstractUrl": "/proceedings-article/iccp/2010/05585090/12OmNvonIOE", "parentPublication": { "id": "proceedings/iccp/2010/7023/0", "title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1991/2190/0/00138566", "title": "Detection and statistics of amplitude and wave front fluctuations", "doi": null, "abstractUrl": "/proceedings-article/ssst/1991/00138566/12OmNyQph6T", "parentPublication": { "id": "proceedings/ssst/1991/2190/0", "title": "The Twenty-Third Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2013/5004/0/5004a302", "title": "An Efficient Image Encryption and Hiding Method Applied by Double Random Phase Encoding", "doi": null, "abstractUrl": "/proceedings-article/iccis/2013/5004a302/12OmNzkMlNH", "parentPublication": { "id": "proceedings/iccis/2013/5004/0", "title": "2013 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1988/0847/0/00017046", "title": "Determination of optical wavefronts perturbed by random fluctuations of the index refraction", "doi": null, "abstractUrl": "/proceedings-article/ssst/1988/00017046/12OmNzmclw1", "parentPublication": { "id": "proceedings/ssst/1988/0847/0", "title": "The Twentieth Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2005/08/r8046", "title": "Computer-Generated Holography as a Generic Display Technology", "doi": null, "abstractUrl": "/magazine/co/2005/08/r8046/13rRUB7a16k", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/04/07829412", "title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors", "doi": null, "abstractUrl": "/journal/tg/2017/04/07829412/13rRUwcS1D1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10025829", "title": "Real-time High-Quality Computer-Generated Hologram Using Complex-Valued Convolutional Neural Network", "doi": null, "abstractUrl": "/journal/tg/5555/01/10025829/1KdUR4fXJXG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199563", "title": "Correcting the Proximity Effect in Nanophotonic Phased Arrays", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199563/1ncgvG9aJ6o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a705", "title": "DCGH: Dynamic Computer Generated Holography for Speckle-Free, High Fidelity 3D Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a705/1tuB1hj1VhS", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuB1K9iOKk", "doi": "10.1109/VR50410.2021.00058", "title": "Proximity Effect Correction for Fresnel Holograms on Nanophotonic Phased Arrays", "normalizedTitle": "Proximity Effect Correction for Fresnel Holograms on Nanophotonic Phased Arrays", "abstract": "Holographic displays and computer-generated holography offer a unique opportunity in improving optical resolutions and depth characteristics of near-eye displays. The thermally-modulated Nanopho-tonic Phased Array (NPA), a new type of holographic display, affords several advantages, including integrated light source and higher refresh rates, over other holographic display technologies. However, the thermal phase modulation of the NPA makes it susceptible to the thermal proximity effect where heating one pixel affects the temperature of nearby pixels. Proximity effect correction (PEC) methods have been proposed for 2D Fourier holograms in the far field but not for Fresnel holograms at user-specified depths. Here we extend an existing PEC method for the NPA to Fresnel holograms with phase-only hologram optimization and validate it through computational simulations. Our method is not only effective in correcting the proximity effect for the Fresnel holograms of 2D images at desired depths but can also leverage the fast refresh rate of the NPA to display 3D scenes with time-division multiplexing.", "abstracts": [ { "abstractType": "Regular", "content": "Holographic displays and computer-generated holography offer a unique opportunity in improving optical resolutions and depth characteristics of near-eye displays. The thermally-modulated Nanopho-tonic Phased Array (NPA), a new type of holographic display, affords several advantages, including integrated light source and higher refresh rates, over other holographic display technologies. However, the thermal phase modulation of the NPA makes it susceptible to the thermal proximity effect where heating one pixel affects the temperature of nearby pixels. Proximity effect correction (PEC) methods have been proposed for 2D Fourier holograms in the far field but not for Fresnel holograms at user-specified depths. Here we extend an existing PEC method for the NPA to Fresnel holograms with phase-only hologram optimization and validate it through computational simulations. Our method is not only effective in correcting the proximity effect for the Fresnel holograms of 2D images at desired depths but can also leverage the fast refresh rate of the NPA to display 3D scenes with time-division multiplexing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Holographic displays and computer-generated holography offer a unique opportunity in improving optical resolutions and depth characteristics of near-eye displays. The thermally-modulated Nanopho-tonic Phased Array (NPA), a new type of holographic display, affords several advantages, including integrated light source and higher refresh rates, over other holographic display technologies. However, the thermal phase modulation of the NPA makes it susceptible to the thermal proximity effect where heating one pixel affects the temperature of nearby pixels. Proximity effect correction (PEC) methods have been proposed for 2D Fourier holograms in the far field but not for Fresnel holograms at user-specified depths. Here we extend an existing PEC method for the NPA to Fresnel holograms with phase-only hologram optimization and validate it through computational simulations. Our method is not only effective in correcting the proximity effect for the Fresnel holograms of 2D images at desired depths but can also leverage the fast refresh rate of the NPA to display 3D scenes with time-division multiplexing.", "fno": "255600a353", "keywords": [ "Computer Generated Holography", "Fourier Transform Optics", "Holographic Displays", "Integrated Optics", "Light Sources", "Nanophotonics", "Optical Arrays", "Proximity Effect Lithography", "Time Division Multiplexing", "Fresnel Holograms", "Phase Only Hologram Optimization", "Computer Generated Holography", "Optical Resolutions", "Depth Characteristics", "Near Eye Displays", "Integrated Light Source", "Holographic Display Technologies", "Thermal Phase Modulation", "Thermal Proximity Effect", "Proximity Effect Correction Methods", "2 D Fourier Holograms", "User Specified Depths", "Thermally Modulated Nanophotonic Phased Array", "Far Field", "Time Division Multiplexing", "3 D Scenes", "Phased Arrays", "Multiplexing", "Solid Modeling", "Three Dimensional Displays", "Phase Modulation", "Proximity Effects", "Holography", "Nanophotonic Phased Array", "Proximity Effect Correction", "Proximal Algorithms", "Phase Only Hologram", "Fresnel Hologram", "Computing Methodologies", "Image Processing", "Computing Methodologies Mixed Augmented Reality Computing Methodologies", "Virtual Reality", "Hardware Displays And Imagers" ], "authors": [ { "affiliation": "University of Maryland", "fullName": "Xuetong Sun", "givenName": "Xuetong", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Yang Zhang", "givenName": "Yang", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Po-Chun Huang", "givenName": "Po-Chun", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Niloy Acharjee", "givenName": "Niloy", "surname": "Acharjee", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Mario Dagenais", "givenName": "Mario", "surname": "Dagenais", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Martin Peckerar", "givenName": "Martin", "surname": "Peckerar", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Amitabh Varshney", "givenName": "Amitabh", "surname": "Varshney", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "353-362", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuB1GIHgk0", "name": "pvr202118380-09417643s1-mm_255600a353.zip", "size": "577 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417643s1-mm_255600a353.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a345", "articleId": "1tuAuPBgHTi", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a363", "articleId": "1tuBqndPqhy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2022/9617/0/961700a746", "title": "Metameric Varifocal Holograms", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a746/1CJcc750PQI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a553", "title": "Sparse Nanophotonic Phased Arrays for Energy-Efficient Holographic Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a553/1CJczHyWyjK", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199563", "title": "Correcting the Proximity Effect in Nanophotonic Phased Arrays", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199563/1ncgvG9aJ6o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAQrYBV", "doi": "10.1109/ISMAR.2014.6948463", "title": "[Poster] Smartwatch-aided handheld augmented reality", "normalizedTitle": "[Poster] Smartwatch-aided handheld augmented reality", "abstract": "We propose a novel method for interaction of humans with real objects in their surrounding combining Visual Search and Augmented Reality (AR). This method is based on utilizing a smartwatch tethered to a smartphone, and it is designed to provide a more user-friendly experience compared to approaches based only on a handheld device, such as a smartphone or a tablet computer. The smart-watch has a built-in camera, which enables scanning objects without the need to take the smartphone out of the pocket. An image captured by the watch is sent wirelessly to the phone that performs Visual Search and subsequently informs the smartwatch whether digital information related to the object is available or not. We thereby distinguish between three cases. If no information is available or the object recognition failed, the user is notified accordingly. If there is digital information available that can be presented using the smartwatch display and/or audio output, it is presented there. The third case is that the recognized object has digital information related to it, which would be beneficial to see in an Augmented Reality view spatially registered with the object in realtime. Then the smartwatch informs the user that this option exists and encourages using the smartphone to experience the Augmented Reality view. Thereby, the user only needs to take the phone out of the pocket in case Augmented Reality content is available, and when the content is of interest for the user.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a novel method for interaction of humans with real objects in their surrounding combining Visual Search and Augmented Reality (AR). This method is based on utilizing a smartwatch tethered to a smartphone, and it is designed to provide a more user-friendly experience compared to approaches based only on a handheld device, such as a smartphone or a tablet computer. The smart-watch has a built-in camera, which enables scanning objects without the need to take the smartphone out of the pocket. An image captured by the watch is sent wirelessly to the phone that performs Visual Search and subsequently informs the smartwatch whether digital information related to the object is available or not. We thereby distinguish between three cases. If no information is available or the object recognition failed, the user is notified accordingly. If there is digital information available that can be presented using the smartwatch display and/or audio output, it is presented there. The third case is that the recognized object has digital information related to it, which would be beneficial to see in an Augmented Reality view spatially registered with the object in realtime. Then the smartwatch informs the user that this option exists and encourages using the smartphone to experience the Augmented Reality view. Thereby, the user only needs to take the phone out of the pocket in case Augmented Reality content is available, and when the content is of interest for the user.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a novel method for interaction of humans with real objects in their surrounding combining Visual Search and Augmented Reality (AR). This method is based on utilizing a smartwatch tethered to a smartphone, and it is designed to provide a more user-friendly experience compared to approaches based only on a handheld device, such as a smartphone or a tablet computer. The smart-watch has a built-in camera, which enables scanning objects without the need to take the smartphone out of the pocket. An image captured by the watch is sent wirelessly to the phone that performs Visual Search and subsequently informs the smartwatch whether digital information related to the object is available or not. We thereby distinguish between three cases. If no information is available or the object recognition failed, the user is notified accordingly. If there is digital information available that can be presented using the smartwatch display and/or audio output, it is presented there. The third case is that the recognized object has digital information related to it, which would be beneficial to see in an Augmented Reality view spatially registered with the object in realtime. Then the smartwatch informs the user that this option exists and encourages using the smartphone to experience the Augmented Reality view. Thereby, the user only needs to take the phone out of the pocket in case Augmented Reality content is available, and when the content is of interest for the user.", "fno": "06948463", "keywords": [ "Visualization", "Augmented Reality", "Search Problems", "Cameras", "Three Dimensional Displays", "Watches", "Prototypes" ], "authors": [ { "affiliation": "Metaio GmbH", "fullName": "Darko Stanimirovic", "givenName": "Darko", "surname": "Stanimirovic", "__typename": "ArticleAuthorType" }, { "affiliation": "Metaio GmbH", "fullName": "Daniel Kurz", "givenName": "Daniel", "surname": "Kurz", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "307-308", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948462", "articleId": "12OmNC8Mswo", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948464", "articleId": "12OmNzTH0Rn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iswc/2003/2034/0/20340127", "title": "First Steps Towards Handheld Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/iswc/2003/20340127/12OmNAlvHNr", "parentPublication": { "id": "proceedings/iswc/2003/2034/0", "title": "Seventh IEEE International Symposium on Wearable Computers, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a196", "title": "[POSTER] Towards Estimating Usability Ratings of Handheld Augmented Reality Using Accelerometer Data", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a196/12OmNBuL1n1", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948456", "title": "[Poster] QR code alteration for augmented reality interactions", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948456/12OmNCga1QG", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836466", "title": "Mobile Augmented Reality Based on Invisible Marker", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836466/12OmNx7G5Tm", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948495", "title": "[Demo] Smartwatch-aided handheld augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948495/12OmNzahc1P", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460065", "title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448289", "title": "Performance Envelopes of in-Air Direct and Smartwatch Indirect Control for Head-Mounted Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448289/13bd1fZBGcE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2012/07/mco2012070026", "title": "Anywhere Interfaces Using Handheld Augmented Reality", "doi": null, "abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699326", "title": "Evaluation of Direct Manipulation Methods in Augmented Reality Environments Using Google Glass", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699326/19F1Oa8ukP6", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a008", "title": "Augmented Reality-Based Peephole Interaction using Real Space Information", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a008/1gysiEAcM7K", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdsuf", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNCd2rIf", "doi": "10.1109/ISMAR.2015.41", "title": "[POSTER] AR4AR: Using Augmented Reality for guidance in Augmented Reality Systems Setup", "normalizedTitle": "[POSTER] AR4AR: Using Augmented Reality for guidance in Augmented Reality Systems Setup", "abstract": "AR systems have been developed for many years now, ranging from systems consisting of a single sensor and output device to systems with a multitude of sensors and/or output devices. With the increasing complexity of the setup, the complexity of handling the different sensors as well as the necessary calibrations and registrations increases accordingly. A much needed (yet missing) area of augmented reality applications is to support AR system engineers when they set up and maintain an AR system by providing visual guides and giving immediate feedback on the current quality of their calibration measurements. In this poster we present an approach to use Augmented Reality itself to support the user in calibrating an Augmented Reality system.", "abstracts": [ { "abstractType": "Regular", "content": "AR systems have been developed for many years now, ranging from systems consisting of a single sensor and output device to systems with a multitude of sensors and/or output devices. With the increasing complexity of the setup, the complexity of handling the different sensors as well as the necessary calibrations and registrations increases accordingly. A much needed (yet missing) area of augmented reality applications is to support AR system engineers when they set up and maintain an AR system by providing visual guides and giving immediate feedback on the current quality of their calibration measurements. In this poster we present an approach to use Augmented Reality itself to support the user in calibrating an Augmented Reality system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "AR systems have been developed for many years now, ranging from systems consisting of a single sensor and output device to systems with a multitude of sensors and/or output devices. With the increasing complexity of the setup, the complexity of handling the different sensors as well as the necessary calibrations and registrations increases accordingly. A much needed (yet missing) area of augmented reality applications is to support AR system engineers when they set up and maintain an AR system by providing visual guides and giving immediate feedback on the current quality of their calibration measurements. In this poster we present an approach to use Augmented Reality itself to support the user in calibrating an Augmented Reality system.", "fno": "7660a140", "keywords": [ "Cameras", "Calibration", "Target Tracking", "Augmented Reality", "Three Dimensional Displays", "Current Measurement", "Visualization" ], "authors": [ { "affiliation": null, "fullName": "Frieder Pankratz", "givenName": "Frieder", "surname": "Pankratz", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gudrun Klinker", "givenName": "Gudrun", "surname": "Klinker", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "140-143", "year": "2015", "issn": null, "isbn": "978-1-4673-7660-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7660a136", "articleId": "12OmNzV70vZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "7660a144", "articleId": "12OmNBO3K9k", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948463", "title": "[Poster] Smartwatch-aided handheld augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948463/12OmNAQrYBV", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a120", "title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2009/3888/3/3888c091", "title": "Ubiquitous Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888c091/12OmNCcKQOw", "parentPublication": { "id": "proceedings/kam/2009/3888/1", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a108", "title": "[POSTER] Transforming Your Website to an Augmented Reality View", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a108/12OmNrIJqv9", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a024", "title": "Measuring Perception of Realism in Mixed and Augmented Reality Summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a024/12OmNwErpst", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948454", "title": "[Poster] Augmented reality binoculars on the move", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948454/12OmNwI8c8P", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a003", "title": "Human Perception and Psychology in Augmented Reality (HPPAR) Summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a003/12OmNz5s0UN", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a039", "title": "[POSTER] Industrial Augmented Reality: Transferring a Numerical Control Connected Augmented Realty System from Marketing to Maintenance", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a039/12OmNzC5Tr7", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699263", "title": "Design and Calibration of an Augmented Reality Haploscope", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699263/19F1OYkEmWs", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/assic/2022/6109/0/10088308", "title": "Home Automation using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/assic/2022/10088308/1M4rH4muSoE", "parentPublication": { "id": "proceedings/assic/2022/6109/0", "title": "2022 International Conference on Advancements in Smart, Secure and Intelligent Computing (ASSIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwl8GHU", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNqC2uWf", "doi": "10.1109/3DUI.2013.6550237", "title": "Poster: 3D referencing for remote task assistance in augmented reality", "normalizedTitle": "Poster: 3D referencing for remote task assistance in augmented reality", "abstract": "We present a 3D referencing technique tailored for remote maintenance tasks in augmented reality. The goal is to improve the accuracy and efficiency with which a remote expert can point out a real physical object at a local site to a technician at that site. In a typical referencing task, the remote expert instructs the local technician to navigate to a location from which a target object can be viewed, and then to attend to that object. The expert and technician both wear head-tracked, stereo, see-through, head-worn displays, and the expert's hands are tracked by a set of depth cameras. The remote expert first selects one of a set of prerecorded viewpoints of the local site, and a representation of that viewpoint is presented to the technician to help them navigate to the correct position and orientation. The expert then uses hand gestures to indicate the target.", "abstracts": [ { "abstractType": "Regular", "content": "We present a 3D referencing technique tailored for remote maintenance tasks in augmented reality. The goal is to improve the accuracy and efficiency with which a remote expert can point out a real physical object at a local site to a technician at that site. In a typical referencing task, the remote expert instructs the local technician to navigate to a location from which a target object can be viewed, and then to attend to that object. The expert and technician both wear head-tracked, stereo, see-through, head-worn displays, and the expert's hands are tracked by a set of depth cameras. The remote expert first selects one of a set of prerecorded viewpoints of the local site, and a representation of that viewpoint is presented to the technician to help them navigate to the correct position and orientation. The expert then uses hand gestures to indicate the target.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a 3D referencing technique tailored for remote maintenance tasks in augmented reality. The goal is to improve the accuracy and efficiency with which a remote expert can point out a real physical object at a local site to a technician at that site. In a typical referencing task, the remote expert instructs the local technician to navigate to a location from which a target object can be viewed, and then to attend to that object. The expert and technician both wear head-tracked, stereo, see-through, head-worn displays, and the expert's hands are tracked by a set of depth cameras. The remote expert first selects one of a set of prerecorded viewpoints of the local site, and a representation of that viewpoint is presented to the technician to help them navigate to the correct position and orientation. The expert then uses hand gestures to indicate the target.", "fno": "06550237", "keywords": [ "Three Dimensional Displays", "Cameras", "Augmented Reality", "Indexes", "Thumb", "Maintenance Engineering", "Maintenance And Repair", "Collaborative Mixed Augmented Reality", "Referencing Technique", "Remote Task Assistance" ], "authors": [ { "affiliation": "Columbia Univ., New York, NY, USA", "fullName": "Ohan Oda", "givenName": "Ohan", "surname": "Oda", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia Univ., New York, NY, USA", "fullName": "Mengu Sukan", "givenName": "Mengu", "surname": "Sukan", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia Univ., New York, NY, USA", "fullName": "Steven Feiner", "givenName": "Steven", "surname": "Feiner", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia Univ., New York, NY, USA", "fullName": "Barbara Tversky", "givenName": "Barbara", "surname": "Tversky", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "179-180", "year": "2013", "issn": null, "isbn": "978-1-4673-6097-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06550236", "articleId": "12OmNx8wTmi", "__typename": "AdjacentArticleType" }, "next": { "fno": "06550238", "articleId": "12OmNy49sMA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402558", "title": "3D referencing techniques for physical objects in shared augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402558/12OmNxj239f", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a218", "title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2003/01/b1065", "title": "Speech-Enabled Augmented Reality Supporting Mobile Industrial Maintenance", "doi": null, "abstractUrl": "/magazine/pc/2003/01/b1065/13rRUxly8UV", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699260", "title": "Comparing Different Augmented Reality Support Applications for Cooperative Repair of an Industrial Robot", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699260/19F1M8A6RHO", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699225", "title": "Augmented Reality Remote Collaboration with Dense Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699225/19F1OvIhORa", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049704", "title": "A Survey on Remote Assistance and Training in Mixed Reality Environments", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049704/1KYowCHxUtO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090418", "title": "Spatial Referencing for Anywhere, Anytime Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090418/1jIxkjif74A", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a321", "title": "Remote Assistance System in Augmented Reality for Early School Dropout Prevention", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a321/1pBMil9f2x2", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a567", "title": "Remote Asynchronous Collaboration in Maintenance scenarios using Augmented Reality and Annotations", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a567/1tnXia3PBp6", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a653", "title": "Remote Assistance with Mixed Reality for Procedural Tasks", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a653/1tnXsEDjkKQ", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxFJXDf", "title": "2011 XIII Symposium on Virtual Reality", "acronym": "svr", "groupId": "1800426", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNvStcVY", "doi": "10.1109/SVR.2011.29", "title": "Geometric Modifications Applied to Real Elements in Augmented Reality", "normalizedTitle": "Geometric Modifications Applied to Real Elements in Augmented Reality", "abstract": "augmented reality applications overlap virtual objects over a real scene considering the context. Today, more advanced applications also make use of diminished reality, which removes real objects from a scene. This paper describes a novel approach that combines augmented reality and diminished reality techniques to modify real objects in augmented reality applications. The proposed approach removes an object and replaces it with its purposely-modified replica. The solution uses dynamic texture techniques and inpaint to enhance the visual response of the performed modification. The results are promising considering both realism of the modified real object and performance of the application.", "abstracts": [ { "abstractType": "Regular", "content": "augmented reality applications overlap virtual objects over a real scene considering the context. Today, more advanced applications also make use of diminished reality, which removes real objects from a scene. This paper describes a novel approach that combines augmented reality and diminished reality techniques to modify real objects in augmented reality applications. The proposed approach removes an object and replaces it with its purposely-modified replica. The solution uses dynamic texture techniques and inpaint to enhance the visual response of the performed modification. The results are promising considering both realism of the modified real object and performance of the application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "augmented reality applications overlap virtual objects over a real scene considering the context. Today, more advanced applications also make use of diminished reality, which removes real objects from a scene. This paper describes a novel approach that combines augmented reality and diminished reality techniques to modify real objects in augmented reality applications. The proposed approach removes an object and replaces it with its purposely-modified replica. The solution uses dynamic texture techniques and inpaint to enhance the visual response of the performed modification. The results are promising considering both realism of the modified real object and performance of the application.", "fno": "4445a096", "keywords": [ "Augmented Reality", "Mixed Reality", "Physically Based Simulation" ], "authors": [ { "affiliation": null, "fullName": "Crystian Wendel M. Leão", "givenName": "Crystian Wendel M.", "surname": "Leão", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "João Paulo Lima", "givenName": "João Paulo", "surname": "Lima", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Veronica Teichrieb", "givenName": "Veronica", "surname": "Teichrieb", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Judith Kelner", "givenName": "Judith", "surname": "Kelner", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Eduardo S. Albuquerque", "givenName": "Eduardo S.", "surname": "Albuquerque", "__typename": "ArticleAuthorType" } ], "idPrefix": "svr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-05-01T00:00:00", "pubType": "proceedings", "pages": "96-101", "year": "2011", "issn": null, "isbn": "978-0-7695-4445-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4445a089", "articleId": "12OmNy2Jtbl", "__typename": "AdjacentArticleType" }, "next": { "fno": "4445a102", "articleId": "12OmNqBbHRF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cse/2010/4323/0/4323a020", "title": "Augmented Reality System Design and Scenario Study for Location-Based Adaptive Mobile Learning", "doi": null, "abstractUrl": "/proceedings-article/cse/2010/4323a020/12OmNA0dMRS", "parentPublication": { "id": "proceedings/cse/2010/4323/0", "title": "2010 13th IEEE International Conference on Computational Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2013/5001/0/06655774", "title": "The Ghosting Technique Applied to Augmented Reality Visualization", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655774/12OmNC8dgpA", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ngmast/2010/4121/0/4121a001", "title": "Enabling Rapid Creation of Content for Consumption in Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ngmast/2010/4121a001/12OmNwMXnsM", "parentPublication": { "id": "proceedings/ngmast/2010/4121/0", "title": "Next Generation Mobile Applications, Services and Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759477", "title": "Altered reality: Augmenting and diminishing reality in real time", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759477/12OmNxEBz1F", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2012/4725/0/4725a141", "title": "A Viewpoint about Diminished Reality: Is it Possible Remove Objects in Real Time from Scenes?", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a141/12OmNxEjXZZ", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402558", "title": "3D referencing techniques for physical objects in shared augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402558/12OmNxj239f", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcs/1999/0253/1/02539195", "title": "Haptics in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s", "parentPublication": { "id": "proceedings/icmcs/1999/0253/1", "title": "Multimedia Computing and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759497", "title": "Demo &#x2014; Altered reality: Augmenting and diminishing reality in real time", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759497/12OmNzxyiIE", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a327", "title": "Augmented Virtuality Training for Special Education Teachers", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a327/1J7WbAdfchq", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a870", "title": "DeclutterAR: Mobile Diminished Reality and Augmented Reality to Address Hoarding by Motivating Decluttering and Selling on Online Marketplace", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a870/1J7WqRKPLO0", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqzcvOE", "title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)", "acronym": "cmvit", "groupId": "1818944", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNyL0Tzc", "doi": "10.1109/CMVIT.2017.9", "title": "Augmented Reality Maintenance Training with Intel Depth Camera", "normalizedTitle": "Augmented Reality Maintenance Training with Intel Depth Camera", "abstract": "Augmented Reality (AR) merges real and virtual objects in order to make a richer environment with additional useful virtual information. Producing AR maintenance training depends on two major points that will be Interactivity and Reusability. Recently Augmented Reality becomes the new technology revolution in software applications such as (entertainment, training, military, E-shops and games), and the new generation of hardware computers, mobiles, and consoles which can support the AR view. Always there is a limitation about user interactivity in using keyboard, mouse, and touch screens as input for virtual reality applications. Hand Gesture Recognition using depth cameras is an innovative solution to easily interact and manipulate virtual scene objects which are needed for computer users widely in entertainment or military applications. This research presents a solution for those areas and helps to design more realistic application with low cost and good efficiency. Using IntelRealSense (RS) Camera, which is a low cost hardware and very helpful as gesture and depth capture camera. Maintenance training was chosen as the case study for a reusable AR framework. This paper present a solution to two major problems: First, the calculation of occlusion between the real and the virtual objects including human hand. Second, problem will be to discuss object collision detection, to add more realism between the gesture recognition and the dynamic virtual scene. The proposed framework is designed to overcome or enhance the problems of developing augmented reality scenes that can be used to integrate between natural human activities and virtual world or real time computer graphics. So, maintenance training framework could make it possible for users to be trained, and helped with real-time instructions which can be followed to achieve the task goal without need to reference guide or separate catalog. The proposed framework succeeded in implementing maintenance training with gesture recognition stability approximately 90 percent with continues picking selection. Each part movements and speed of gesture action transition took less than a second with medium hand speed with approximately 1 second hand pose for gesture recognition.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) merges real and virtual objects in order to make a richer environment with additional useful virtual information. Producing AR maintenance training depends on two major points that will be Interactivity and Reusability. Recently Augmented Reality becomes the new technology revolution in software applications such as (entertainment, training, military, E-shops and games), and the new generation of hardware computers, mobiles, and consoles which can support the AR view. Always there is a limitation about user interactivity in using keyboard, mouse, and touch screens as input for virtual reality applications. Hand Gesture Recognition using depth cameras is an innovative solution to easily interact and manipulate virtual scene objects which are needed for computer users widely in entertainment or military applications. This research presents a solution for those areas and helps to design more realistic application with low cost and good efficiency. Using IntelRealSense (RS) Camera, which is a low cost hardware and very helpful as gesture and depth capture camera. Maintenance training was chosen as the case study for a reusable AR framework. This paper present a solution to two major problems: First, the calculation of occlusion between the real and the virtual objects including human hand. Second, problem will be to discuss object collision detection, to add more realism between the gesture recognition and the dynamic virtual scene. The proposed framework is designed to overcome or enhance the problems of developing augmented reality scenes that can be used to integrate between natural human activities and virtual world or real time computer graphics. So, maintenance training framework could make it possible for users to be trained, and helped with real-time instructions which can be followed to achieve the task goal without need to reference guide or separate catalog. The proposed framework succeeded in implementing maintenance training with gesture recognition stability approximately 90 percent with continues picking selection. Each part movements and speed of gesture action transition took less than a second with medium hand speed with approximately 1 second hand pose for gesture recognition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) merges real and virtual objects in order to make a richer environment with additional useful virtual information. Producing AR maintenance training depends on two major points that will be Interactivity and Reusability. Recently Augmented Reality becomes the new technology revolution in software applications such as (entertainment, training, military, E-shops and games), and the new generation of hardware computers, mobiles, and consoles which can support the AR view. Always there is a limitation about user interactivity in using keyboard, mouse, and touch screens as input for virtual reality applications. Hand Gesture Recognition using depth cameras is an innovative solution to easily interact and manipulate virtual scene objects which are needed for computer users widely in entertainment or military applications. This research presents a solution for those areas and helps to design more realistic application with low cost and good efficiency. Using IntelRealSense (RS) Camera, which is a low cost hardware and very helpful as gesture and depth capture camera. Maintenance training was chosen as the case study for a reusable AR framework. This paper present a solution to two major problems: First, the calculation of occlusion between the real and the virtual objects including human hand. Second, problem will be to discuss object collision detection, to add more realism between the gesture recognition and the dynamic virtual scene. The proposed framework is designed to overcome or enhance the problems of developing augmented reality scenes that can be used to integrate between natural human activities and virtual world or real time computer graphics. So, maintenance training framework could make it possible for users to be trained, and helped with real-time instructions which can be followed to achieve the task goal without need to reference guide or separate catalog. The proposed framework succeeded in implementing maintenance training with gesture recognition stability approximately 90 percent with continues picking selection. Each part movements and speed of gesture action transition took less than a second with medium hand speed with approximately 1 second hand pose for gesture recognition.", "fno": "07878725", "keywords": [ "Augmented Reality", "Cameras", "Gesture Recognition", "Object Detection", "Augmented Reality Maintenance Training", "Intel Depth Cameras", "Virtual Objects", "AR Maintenance Training", "Reusability", "Software Applications", "Hardware Computers", "AR View", "User Interactivity", "Keyboard", "Mouse", "Touch Screens", "Virtual Reality Applications", "Hand Gesture Recognition", "Innovative Solution", "Virtual Scene Objects", "Computer Users", "Military Applications", "Intel Real Sense", "RS Camera", "Depth Capture Camera", "Reusable AR Framework", "Human Hand", "Object Collision Detection", "Dynamic Virtual Scene", "Augmented Reality Scenes", "Natural Human Activities", "Virtual World", "Real Time Computer Graphics", "Maintenance Training Framework", "Reference Guide", "Gesture Recognition Stability", "Gesture Action Transition", "Cameras", "Maintenance Engineering", "Gesture Recognition", "Three Dimensional Displays", "Training", "Augmented Reality", "Computers", "Augmanted Reality", "Computer Graphics", "Framework", "Intel Real Scense" ], "authors": [ { "affiliation": "Arab Academy for Science and Technology", "fullName": "Mohamed Abdelnaby", "givenName": "Mohamed", "surname": "Abdelnaby", "__typename": "ArticleAuthorType" }, { "affiliation": "Arab Academy for Science and Technology", "fullName": "Mostafa Abd Elazem", "givenName": "Mostafa", "surname": "Abd Elazem", "__typename": "ArticleAuthorType" }, { "affiliation": "Military Technical Collage", "fullName": "Hussain A. Aly", "givenName": "Hussain A.", "surname": "Aly", "__typename": "ArticleAuthorType" }, { "affiliation": "ACS Company", "fullName": "Ahmed Kaboudan", "givenName": "Ahmed", "surname": "Kaboudan", "__typename": "ArticleAuthorType" } ], "idPrefix": "cmvit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-02-01T00:00:00", "pubType": "proceedings", "pages": "116-122", "year": "2017", "issn": null, "isbn": "978-1-5090-4993-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07878724", "articleId": "12OmNroijn0", "__typename": "AdjacentArticleType" }, "next": { "fno": "07878726", "articleId": "12OmNym2bRR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671841", "title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325483", "title": "A preliminary study of a hybrid user interface for augmented reality applications", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325483/12OmNBtl1pV", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2014/3630/0/3629a382", "title": "Augmented Reality for Improved Service Delivery", "doi": null, "abstractUrl": "/proceedings-article/aina/2014/3629a382/12OmNrJRPmz", "parentPublication": { "id": "proceedings/aina/2014/3630/0", "title": "2014 IEEE 28th International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223442", "title": "Augmented reality for maintenance application on a mobile platform", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223442/12OmNxWLTlB", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223429", "title": "Augmented reality maintenance demonstrator and associated modelling", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223429/12OmNylKAXJ", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a039", "title": "[POSTER] Industrial Augmented Reality: Transferring a Numerical Control Connected Augmented Realty System from Marketing to Maintenance", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a039/12OmNzC5Tr7", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699183", "title": "Industrial Augmented Reality: Requirements for an Augmented Reality Maintenance Worker Support System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699183/19F1MWRWSqs", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2022/8810/0/881000a545", "title": "A Systematic Literature Review of Virtual and Augmented Reality Applications for Maintenance in Manufacturing", "doi": null, "abstractUrl": "/proceedings-article/compsac/2022/881000a545/1FJ5OxsS4Ba", "parentPublication": { "id": "proceedings/compsac/2022/8810/0", "title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a678", "title": "Industrial Augmented Reality: Lessons learned from a long-term On-site Assessment of Augmented Reality Maintenance Worker Support Systems", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a678/1J7WeihLonS", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a067", "title": "Industrial Augmented Reality: Concepts and User Interface Designs for Augmented Reality Maintenance Worker Support Systems", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a067/1pBMhXqBhCM", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzUPpz2", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "acronym": "icsgea", "groupId": "1814444", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNroijjJ", "doi": "10.1109/ICSGEA.2017.167", "title": "Experimental Study and Analysis on Heat Transfer Effect of Far Infrared Convection Combined Drying", "normalizedTitle": "Experimental Study and Analysis on Heat Transfer Effect of Far Infrared Convection Combined Drying", "abstract": "Far infrared convection combined heat transfer is one of the best methods for energy saving and drying. In this paper, a comprehensive experimental study and analysis of the far infrared convection combined drying was carried out, to explore the intrinsic link and interaction between parameters in the drying process. Far infrared convection combined heat transfer was discussed in detail. The test was performed on the self-made test bench. The two orthogonal rotation regression design was made to get the regression equation, and by analyzing the corresponding heat transfer response surface, the relationship between the parameters on heat transfer efficiency was obtained.", "abstracts": [ { "abstractType": "Regular", "content": "Far infrared convection combined heat transfer is one of the best methods for energy saving and drying. In this paper, a comprehensive experimental study and analysis of the far infrared convection combined drying was carried out, to explore the intrinsic link and interaction between parameters in the drying process. Far infrared convection combined heat transfer was discussed in detail. The test was performed on the self-made test bench. The two orthogonal rotation regression design was made to get the regression equation, and by analyzing the corresponding heat transfer response surface, the relationship between the parameters on heat transfer efficiency was obtained.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Far infrared convection combined heat transfer is one of the best methods for energy saving and drying. In this paper, a comprehensive experimental study and analysis of the far infrared convection combined drying was carried out, to explore the intrinsic link and interaction between parameters in the drying process. Far infrared convection combined heat transfer was discussed in detail. The test was performed on the self-made test bench. The two orthogonal rotation regression design was made to get the regression equation, and by analyzing the corresponding heat transfer response surface, the relationship between the parameters on heat transfer efficiency was obtained.", "fno": "2813a167", "keywords": [ "Convection", "Drying", "Heat Transfer", "Regression Analysis", "Heat Transfer Effect", "Drying Process", "Infrared Convection Combined Heat Transfer", "Heat Transfer Efficiency", "Far Infrared Convection Combined Drying", "Heat Transfer Response Surface", "Regression Equation", "Self Made Test Bench", "Infrared Heating", "Electron Tubes", "Response Surface Methodology", "Temperature", "Convection", "Far Infrared Convection", "Combined Drying", "Paddy", "Heat Transfer Effect" ], "authors": [ { "affiliation": null, "fullName": "Liu Chunshan", "givenName": "Liu", "surname": "Chunshan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shang Tao", "givenName": "Shang", "surname": "Tao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yang Shaoqi", "givenName": "Yang", "surname": "Shaoqi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wu Wenfu", "givenName": "Wu", "surname": "Wenfu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chen Siyu", "givenName": "Chen", "surname": "Siyu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icsgea", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "167-170", "year": "2017", "issn": null, "isbn": "978-1-5386-2813-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2813a163", "articleId": "12OmNwIYZyY", "__typename": "AdjacentArticleType" }, "next": { "fno": "2813a171", "articleId": "12OmNyRxFDF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cdciem/2011/4350/0/4350b112", "title": "Effect of Cavity Wall Temperature and Opening Ratio on the Natural Convection Heat Loss Characteristics of a Solar Cavity Receiver", "doi": null, "abstractUrl": "/proceedings-article/cdciem/2011/4350b112/12OmNARRYl6", "parentPublication": { "id": "proceedings/cdciem/2011/4350/0", "title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2016/6061/0/6061a505", "title": "Experimental Study on Heat Transfer Effect of Far Infrared Convection Combined Drying", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2016/6061a505/12OmNAoUTju", "parentPublication": { "id": "proceedings/icitbs/2016/6061/0", "title": "2016 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2017/2813/0/2813a089", "title": "Research on the Black Agarics Variable Temperature Infrared-Convection Drying Equipment", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2017/2813a089/12OmNApcuql", "parentPublication": { "id": "proceedings/icsgea/2017/2813/0", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2018/6580/0/658001a552", "title": "Experimental Study and Analysis on Heat Transfer Hot Air Temperature of Far Infrared Convection Combined Drying", "doi": null, "abstractUrl": "/proceedings-article/icris/2018/658001a552/12OmNBlFR2c", "parentPublication": { "id": "proceedings/icris/2018/6580/0", "title": "2018 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2011/4296/1/4296a482", "title": "Analysis of Solidification Heat Transfer Process on Double-Convection Condition", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296a482/12OmNCzb9yo", "parentPublication": { "id": "proceedings/icmtma/2011/4296/1", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icetet/2008/3267/0/3267b089", "title": "Effect of Turbulence and Fluid Properties on Forced Convection in Tubes", "doi": null, "abstractUrl": "/proceedings-article/icetet/2008/3267b089/12OmNrK9q4K", "parentPublication": { "id": "proceedings/icetet/2008/3267/0", "title": "Emerging Trends in Engineering & Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2017/2813/0/2813a155", "title": "Analysis on the Influence of the Exchange Area on the Heat Exchange Efficiency during Far-Infrared Convection Combination Grain Drying Process", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2017/2813a155/12OmNyNQSMh", "parentPublication": { "id": "proceedings/icsgea/2017/2813/0", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07759986", "title": "Thermal analysis of modified pin fin heat sink for natural convection", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07759986/12OmNzdoMMM", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/289/1/05750649", "title": "Numerical research on heat transfer characteristics of the intensified thermoelectric system for exhaust", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750649/12OmNzvQI3N", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a202", "title": "Visual Exploration of Circulation Rolls in Convective Heat Flows", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a202/1dlwrj2iD60", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzUPpz2", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "acronym": "icsgea", "groupId": "1814444", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNyNQSMh", "doi": "10.1109/ICSGEA.2017.177", "title": "Analysis on the Influence of the Exchange Area on the Heat Exchange Efficiency during Far-Infrared Convection Combination Grain Drying Process", "normalizedTitle": "Analysis on the Influence of the Exchange Area on the Heat Exchange Efficiency during Far-Infrared Convection Combination Grain Drying Process", "abstract": "Based on energy-saving and quality-keeping drying concept proposed by domestic and overseas scholars in recent years, this paper designs a new experiment scheme.The test was performed on the self-made test bench by means of far infrared convection combined drying method.To explore the intrinsic link and interaction between parameters in the drying process, the inlet air volume was 15.24 m3/h, 18.78 m3/h and 26.23 m3/h. Heat transfer tube spray HS-2-1 low temperature infrared materials and non-sprayed infrared materials. The data of each group were tested and the heat transfer efficiency was calculated. Analysis of sprayed infrared materials and non-sprayed infrared materials under conditions, The heat transfer efficiency to achieve the best state of the parameters of the range.", "abstracts": [ { "abstractType": "Regular", "content": "Based on energy-saving and quality-keeping drying concept proposed by domestic and overseas scholars in recent years, this paper designs a new experiment scheme.The test was performed on the self-made test bench by means of far infrared convection combined drying method.To explore the intrinsic link and interaction between parameters in the drying process, the inlet air volume was 15.24 m3/h, 18.78 m3/h and 26.23 m3/h. Heat transfer tube spray HS-2-1 low temperature infrared materials and non-sprayed infrared materials. The data of each group were tested and the heat transfer efficiency was calculated. Analysis of sprayed infrared materials and non-sprayed infrared materials under conditions, The heat transfer efficiency to achieve the best state of the parameters of the range.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Based on energy-saving and quality-keeping drying concept proposed by domestic and overseas scholars in recent years, this paper designs a new experiment scheme.The test was performed on the self-made test bench by means of far infrared convection combined drying method.To explore the intrinsic link and interaction between parameters in the drying process, the inlet air volume was 15.24 m3/h, 18.78 m3/h and 26.23 m3/h. Heat transfer tube spray HS-2-1 low temperature infrared materials and non-sprayed infrared materials. The data of each group were tested and the heat transfer efficiency was calculated. Analysis of sprayed infrared materials and non-sprayed infrared materials under conditions, The heat transfer efficiency to achieve the best state of the parameters of the range.", "fno": "2813a155", "keywords": [ "Convection", "Drying", "Heat Exchangers", "Infrared Spectra", "Sprays", "Heat Exchange Efficiency", "Energy Saving", "Quality Keeping Drying Concept", "Intrinsic Link", "Inlet Air Volume", "Transfer Tube Spray", "HS 2 1 Low Temperature Infrared Materials", "Nonsprayed Infrared Materials", "Heat Transfer Efficiency", "Sprayed Infrared Materials", "Exchange Area", "Far Infrared Convection Combination Grain Drying Process", "Heat Transfer", "Infrared Heating", "Electron Tubes", "Temperature Distribution", "Temperature Sensors", "Spraying", "Resistance Heating", "Far Infrared Convection Combined Drying Paddy Exchange Area" ], "authors": [ { "affiliation": null, "fullName": "Liu Chunshan", "givenName": "Liu", "surname": "Chunshan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shang Tao", "givenName": "Shang", "surname": "Tao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yang Shaoqi", "givenName": "Yang", "surname": "Shaoqi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wu Wenfu", "givenName": "Wu", "surname": "Wenfu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chen Siyu", "givenName": "Chen", "surname": "Siyu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icsgea", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "155-158", "year": "2017", "issn": null, "isbn": "978-1-5386-2813-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2813a151", "articleId": "12OmNx7G65p", "__typename": "AdjacentArticleType" }, "next": { "fno": "2813a159", "articleId": "12OmNyQ7G8n", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icitbs/2016/6061/0/6061a505", "title": "Experimental Study on Heat Transfer Effect of Far Infrared Convection Combined Drying", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2016/6061a505/12OmNAoUTju", "parentPublication": { "id": "proceedings/icitbs/2016/6061/0", "title": "2016 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2017/2813/0/2813a089", "title": "Research on the Black Agarics Variable Temperature Infrared-Convection Drying Equipment", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2017/2813a089/12OmNApcuql", "parentPublication": { "id": "proceedings/icsgea/2017/2813/0", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2018/6580/0/658001a552", "title": "Experimental Study and Analysis on Heat Transfer Hot Air Temperature of Far Infrared Convection Combined Drying", "doi": null, "abstractUrl": "/proceedings-article/icris/2018/658001a552/12OmNBlFR2c", "parentPublication": { "id": "proceedings/icris/2018/6580/0", "title": "2018 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a177", "title": "Design of Computer Measurement and Control System of Test Device for Grain Drying Based on Virtual Instrument", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a177/12OmNqGitS8", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2017/2813/0/2813a167", "title": "Experimental Study and Analysis on Heat Transfer Effect of Far Infrared Convection Combined Drying", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2017/2813a167/12OmNroijjJ", "parentPublication": { "id": "proceedings/icsgea/2017/2813/0", "title": "2017 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cdciem/2011/4350/0/4350a782", "title": "Experimental Study on Far Infrared and Cross Flow Combination Heating System for Vegetables Drying", "doi": null, "abstractUrl": "/proceedings-article/cdciem/2011/4350a782/12OmNyQpgXW", "parentPublication": { "id": "proceedings/cdciem/2011/4350/0", "title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvris/2018/8031/0/803100a210", "title": "Pilot-Plant Test on the Hot Air Drying inPotato Staple Foods", "doi": null, "abstractUrl": "/proceedings-article/icvris/2018/803100a210/17D45WgziT3", "parentPublication": { "id": "proceedings/icvris/2018/8031/0", "title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedcs/2022/5541/0/554100a294", "title": "Substation Power Equipment Monitoring System Based on Infrared Detection", "doi": null, "abstractUrl": "/proceedings-article/icedcs/2022/554100a294/1JC1vnIcYhO", "parentPublication": { "id": "proceedings/icedcs/2022/5541/0", "title": "2022 International Conference on Electronics and Devices, Computational Science (ICEDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/06/08936479", "title": "Time-Resolved Far Infrared Light Transport Decomposition for Thermal Photometric Stereo", "doi": null, "abstractUrl": "/journal/tp/2021/06/08936479/1fRz1xEKLDy", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2021/4854/0/485400a304", "title": "Parameters Modeling of Paddy Grain Drying Based on Orthogonal Experiment", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2021/485400a304/1wB6H5v5sYw", "parentPublication": { "id": "proceedings/icitbs/2021/4854/0", "title": "2021 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cM", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNz3bdL7", "doi": "10.1109/ISMAR-Adjunct.2017.53", "title": "[POSTER] MR TV Mozaik: A New Mixed Reality Interactive TV Experience", "normalizedTitle": "[POSTER] MR TV Mozaik: A New Mixed Reality Interactive TV Experience", "abstract": "Technicolor has been investigating how Mixed Reality technology could impact the future of home entertainment. We have designed and implemented a system to extend a standard TV experience with AR content, using a consumer tablet or a headset. A virtual TV mosaic is displayed around the TV screen and used as a GUI to control both TV and MR content. Using this interface, the user is able to switch TV content, display meta-data in AR (subtitles, text information or program guide), enhance TV content with interactive 3D objects blended in the environment, or play a game in interaction with the real world. The interactions between the real and the virtual worlds are handled thanks to a scene analysis pre-processing stage, which provides information about both the geometry and the lighting of the real environment. The real-virtual interactions strongly contribute to reinforcement of the immersion feeling. User feedback shows that the concept is very promising.", "abstracts": [ { "abstractType": "Regular", "content": "Technicolor has been investigating how Mixed Reality technology could impact the future of home entertainment. We have designed and implemented a system to extend a standard TV experience with AR content, using a consumer tablet or a headset. A virtual TV mosaic is displayed around the TV screen and used as a GUI to control both TV and MR content. Using this interface, the user is able to switch TV content, display meta-data in AR (subtitles, text information or program guide), enhance TV content with interactive 3D objects blended in the environment, or play a game in interaction with the real world. The interactions between the real and the virtual worlds are handled thanks to a scene analysis pre-processing stage, which provides information about both the geometry and the lighting of the real environment. The real-virtual interactions strongly contribute to reinforcement of the immersion feeling. User feedback shows that the concept is very promising.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Technicolor has been investigating how Mixed Reality technology could impact the future of home entertainment. We have designed and implemented a system to extend a standard TV experience with AR content, using a consumer tablet or a headset. A virtual TV mosaic is displayed around the TV screen and used as a GUI to control both TV and MR content. Using this interface, the user is able to switch TV content, display meta-data in AR (subtitles, text information or program guide), enhance TV content with interactive 3D objects blended in the environment, or play a game in interaction with the real world. The interactions between the real and the virtual worlds are handled thanks to a scene analysis pre-processing stage, which provides information about both the geometry and the lighting of the real environment. The real-virtual interactions strongly contribute to reinforcement of the immersion feeling. User feedback shows that the concept is very promising.", "fno": "6327a155", "keywords": [ "TV", "Three Dimensional Displays", "Virtual Reality", "Standards", "Servers", "Headphones", "Switches", "Mixed Reality", "Extended TV", "GUI", "Spatial Interactions", "Lighting Estimation", "3 D Modeling" ], "authors": [ { "affiliation": null, "fullName": "Matthieu Fradet", "givenName": "Matthieu", "surname": "Fradet", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Caroline Baillard", "givenName": "Caroline", "surname": "Baillard", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anthony Laurent", "givenName": "Anthony", "surname": "Laurent", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tao Luo", "givenName": "Tao", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Philippe Robert", "givenName": "Philippe", "surname": "Robert", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Vincent Alleaume", "givenName": "Vincent", "surname": "Alleaume", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Pierrick Jouet", "givenName": "Pierrick", "surname": "Jouet", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fabien Servant", "givenName": "Fabien", "surname": "Servant", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "155-159", "year": "2017", "issn": null, "isbn": "978-0-7695-6327-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6327a149", "articleId": "12OmNwB2dUU", "__typename": "AdjacentArticleType" }, "next": { "fno": "6327a160", "articleId": "12OmNx76TWW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550223", "title": "Poster: Gesture-based control of avatars for social TV", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550223/12OmNAGepYr", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682150", "title": "Social TV EPG Interaction Design for Multi-screen Environment", "doi": null, "abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682150/12OmNANkohQ", "parentPublication": { "id": "proceedings/greencom-ithingscpscom/2013/5046/0", "title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmdcm/2011/4413/0/4413a061", "title": "From Virtual Signified to Virtual Reality: Animation in China Children-Oriented Sports TV Program for Beijing Olympic Games", "doi": null, "abstractUrl": "/proceedings-article/dmdcm/2011/4413a061/12OmNCctf8T", "parentPublication": { "id": "proceedings/dmdcm/2011/4413/0", "title": "Digital Media and Digital Content Management, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607618", "title": "MR images reconstruction based on TV-Group sparse model", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607618/12OmNwkR5u5", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mines/2011/4559/0/4559a371", "title": "TV Monitor: A P2P-TV Content Monitoring Platform", "doi": null, "abstractUrl": "/proceedings-article/mines/2011/4559a371/12OmNx57HJ7", "parentPublication": { "id": "proceedings/mines/2011/4559/0", "title": "Multimedia Information Networking and Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892230", "title": "Cinematic virtual reality: Evaluating the effect of display type on the viewing experience for panoramic video", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892230/12OmNx5GTZ2", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836528", "title": "Mixed Reality Extended TV", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836528/12OmNx7ouOs", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2014/03/mmu2014030010", "title": "Toward Multiscreen Social TV with Geolocation-Aware Social Sense", "doi": null, "abstractUrl": "/magazine/mu/2014/03/mmu2014030010/13rRUIIVlhu", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a411", "title": "Video Content Representation to Support the Hyper-reality Experience in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a411/1tuAwSqh42s", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a265", "title": "The Passenger Experience of Mixed Reality Virtual Display Layouts in Airplane Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a265/1yeCTWHYvxS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJemuUb5Be", "doi": "10.1109/VRW55335.2022.00234", "title": "MR-RIEW: An MR Toolkit for Designing Remote Immersive Experiment Workflows", "normalizedTitle": "MR-RIEW: An MR Toolkit for Designing Remote Immersive Experiment Workflows", "abstract": "We present MR-RIEW, a toolkit for virtual and mixed reality that provides researchers with a dynamic way to design an immersive experiment workflow including instructions, environments, sessions, trials and questionnaires. It is implemented in Unity via scriptable objects, allowing simple customisation. The graphic elements, the scenes and the questionnaires can be selected and associated without code. MR-RIEW can save locally into the headset and remotely the questionnaire&#x0027;s answers. MR-RIEW is connected to Google Firebase service for the remote solution requiring a minimal configuration.", "abstracts": [ { "abstractType": "Regular", "content": "We present MR-RIEW, a toolkit for virtual and mixed reality that provides researchers with a dynamic way to design an immersive experiment workflow including instructions, environments, sessions, trials and questionnaires. It is implemented in Unity via scriptable objects, allowing simple customisation. The graphic elements, the scenes and the questionnaires can be selected and associated without code. MR-RIEW can save locally into the headset and remotely the questionnaire&#x0027;s answers. MR-RIEW is connected to Google Firebase service for the remote solution requiring a minimal configuration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present MR-RIEW, a toolkit for virtual and mixed reality that provides researchers with a dynamic way to design an immersive experiment workflow including instructions, environments, sessions, trials and questionnaires. It is implemented in Unity via scriptable objects, allowing simple customisation. The graphic elements, the scenes and the questionnaires can be selected and associated without code. MR-RIEW can save locally into the headset and remotely the questionnaire's answers. MR-RIEW is connected to Google Firebase service for the remote solution requiring a minimal configuration.", "fno": "840200a766", "keywords": [ "Augmented Reality", "Public Domain Software", "MR Toolkit", "MR RIEW", "Virtual Reality", "Mixed Reality", "Scriptable Objects", "Graphic Elements", "Remote Immersive Experiment Workflow Design", "Unity", "Google Firebase Service", "Codes", "Three Dimensional Displays", "Conferences", "Virtual Reality", "Resists", "Writing", "User Interfaces", "Human Centered Computing X 2014 Visualization X 2014 Visualization Techniques X 2014 Treemaps", "Human Centered Computing X 2014 Visualization X 2014 Visualization Design And Evaluation Methods" ], "authors": [ { "affiliation": "imperial College London", "fullName": "Riccardo Bovo", "givenName": "Riccardo", "surname": "Bovo", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Daniele Giunchi", "givenName": "Daniele", "surname": "Giunchi", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Anthony Steed", "givenName": "Anthony", "surname": "Steed", "__typename": "ArticleAuthorType" }, { "affiliation": "imperial College London", "fullName": "Thomas Heinis", "givenName": "Thomas", "surname": "Heinis", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "766-767", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a764", "articleId": "1CJexMJUGxa", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a768", "articleId": "1CJe2YDpDIk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrais/1993/1363/0/00380743", "title": "The MR Toolkit Peers Package and experiment", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380743/12OmNqJq4F7", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836524", "title": "Perceptual Issues of a Passive Haptics Feedback Based MR System", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836524/12OmNxecS4t", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480753", "title": "Symmetric Model of Remote Collaborative MR Using Tangible Replicas", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480753/12OmNyL0TDr", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a204", "title": "Comparative Reality: Measuring User Experience and Emotion in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a204/17D45Xi9rXe", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a720", "title": "Distinguishing Visual Fatigue, Mental Workload and Acute Stress in Immersive Virtual Reality with Physiological Data: pre-test results", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a720/1CJfmsWj0is", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798207", "title": "[DC] Self-Adaptive Technologies for Immersive Trainings", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798207/1cJ10bYBC2Q", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a353", "title": "Perceptual MR Space: Interactive Toolkit for Efficient Environment Reconstruction in Mobile Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a353/1gysi7jaaKQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090680", "title": "Windtherm Fire: An MR System for Experiencing Breathing Fire of a Dragon", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090680/1jIxkaR6olG", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199566", "title": "Dream-Experiment: A MR User Interface with Natural Multi-channel Interaction for Virtual Experiments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199566/1ncgz7SgSic", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a759", "title": "Turning a Messy Room into a Fully Immersive VR Playground", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a759/1tnXiK8j7fq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0MDh0jTO", "doi": "10.1109/VR.2019.8797798", "title": "Revisiting Jean Louis Baudry&#x0027;s Concept of the Ideological Apparatus: Linking Virtual Reality to The Dream Analogy", "normalizedTitle": "Revisiting Jean Louis Baudry's Concept of the Ideological Apparatus: Linking Virtual Reality to The Dream Analogy", "abstract": "This paper aims to link Jean Louis Baudry's theory of cinematographic apparatus, and in particular his &#x201C;dream analogy&#x201D;, to the way we interpret experiential states within a Virtual Reality environment. Analogous elements that constitute and assist in the reading of the &#x201C;dream analogy&#x201D; will be presented as threads for consideration of the relevance and validity of such theory to Virtual Reality. The paper extends research previously undertaken by the author which introduced and linked the theory of the apparatus to electronic media art and in particular to interactive installations and responsive environments.", "abstracts": [ { "abstractType": "Regular", "content": "This paper aims to link Jean Louis Baudry's theory of cinematographic apparatus, and in particular his &#x201C;dream analogy&#x201D;, to the way we interpret experiential states within a Virtual Reality environment. Analogous elements that constitute and assist in the reading of the &#x201C;dream analogy&#x201D; will be presented as threads for consideration of the relevance and validity of such theory to Virtual Reality. The paper extends research previously undertaken by the author which introduced and linked the theory of the apparatus to electronic media art and in particular to interactive installations and responsive environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper aims to link Jean Louis Baudry's theory of cinematographic apparatus, and in particular his “dream analogy”, to the way we interpret experiential states within a Virtual Reality environment. Analogous elements that constitute and assist in the reading of the “dream analogy” will be presented as threads for consideration of the relevance and validity of such theory to Virtual Reality. The paper extends research previously undertaken by the author which introduced and linked the theory of the apparatus to electronic media art and in particular to interactive installations and responsive environments.", "fno": "08797798", "keywords": [ "Art", "Virtual Reality", "Jean Louis Baudrys Concept", "Ideological Apparatus", "Jean Louis Baudrys Theory", "Cinematographic Apparatus", "Experiential States", "Virtual Reality Environment", "Dream Analogy", "Electronic Media Art", "Interactive Installations", "Virtual Reality", "Robots", "Media", "Art", "Conferences", "Three Dimensional Displays", "User Interfaces", "Film Theory", "Media Art", "Participatory Experience", "Jean Louis Baudry" ], "authors": [ { "affiliation": "Creative Robotics Lab, The University of New South Wales, Sydney", "fullName": "Mari Velonaki", "givenName": "Mari", "surname": "Velonaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1675-1676", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798289", "articleId": "1cJ0Q1IA4Pm", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797842", "articleId": "1cJ15kwNxnO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2012/1204/0/06184220", "title": "Escape from Meadwyn 4: A cross-platform environment for collaborative navigation tasks", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184220/12OmNy49sNc", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2011/1118/1/06029834", "title": "Augmented Reality Go: Extending Traditional Game Play with Interactive Self-Learning Support", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2011/06029834/12OmNzb7Zpy", "parentPublication": { "id": "proceedings/rtcsa/2011/1118/1", "title": "2011 IEEE 17th International Conference on Embedded and Real-Time Computing Systems and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icore/2021/0210/0/021000a072", "title": "Ask Billy: An Informative Kiosk in a University", "doi": null, "abstractUrl": "/proceedings-article/icore/2021/021000a072/1AqyrCFgDT2", "parentPublication": { "id": "proceedings/icore/2021/0210/0", "title": "2021 1st International Conference in Information and Computing Research (iCORE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a846", "title": "AmbientTransfer: Presence Enhancement by Converting Video Ambient to Users&#x0027; Somatosensory Feedback", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a846/1CJeeimnzmU", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a319", "title": "Personalization of Intelligent Virtual Agents for Motion Training in Social Settings", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a319/1CJfaVQLXXy", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a742", "title": "Social Presence in VR Empathy Game for Children: Empathic Interaction with the Virtual Characters", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a742/1CJfetqDtnO", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a097", "title": "Application of LargeSpace for Investigating Pedestrians&#x0027; Behaviors when Interacting with Autonomous Vehicles in Shared Spaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a097/1CJfrzeh5ny", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797811", "title": "A Virtual-real Occlusion Method Based on GPU Acceleration for MR", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797811/1cJ0INo0Vjy", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797893", "title": "Augmented Reality Interfaces for Semi-Autonomous Drones", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797893/1cJ0NJAEGQw", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199566", "title": "Dream-Experiment: A MR User Interface with Natural Multi-channel Interaction for Virtual Experiments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199566/1ncgz7SgSic", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cdOZ3ClGUM", "title": "2018 XLIV Latin American Computer Conference (CLEI)", "acronym": "clei", "groupId": "1801981", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "1cdOZIqkrfO", "doi": "10.1109/CLEI.2018.00018", "title": "Supporting Interactive System Development Following MR-MPS-SW with HCI Approaches", "normalizedTitle": "Supporting Interactive System Development Following MR-MPS-SW with HCI Approaches", "abstract": "The MPS for Software reference model (MR-MPSSW) is one of the models currently used in Brazilian industry. This software process capability maturity (SPCM) model describes Software Engineering (SE) practices that are applied in the software development by the use of approaches (such as methods, techniques and so on). The Human-Computer Interaction (HCI) community has defined specific approaches (methods, techniques, patterns, and standards) for the conception, design, implementation, and evaluation of interactive systems development. Nevertheless, the literature points out that those approaches are not sufficiently used in industry. Taking advantage of the large use of SPCM models, we have identified HCI approaches that could support SPCM practices related to five processes (Requirements Development, Design and Construction of the Product, Product Integration, Verification and Validation) advocated by the MR-MPS-SW. Analyzing the HCI literature and MR-MPS-SW, we identified a set of HCI approaches for interactive systems development. We interviewed twenty HCI experts to validate and improve this initial set. Thus, we identified 14 HCI categories with examples of methods, techniques, patterns, and standards adequate for performing SE practices of the MR-MPS-SW when developing interactive systems.", "abstracts": [ { "abstractType": "Regular", "content": "The MPS for Software reference model (MR-MPSSW) is one of the models currently used in Brazilian industry. This software process capability maturity (SPCM) model describes Software Engineering (SE) practices that are applied in the software development by the use of approaches (such as methods, techniques and so on). The Human-Computer Interaction (HCI) community has defined specific approaches (methods, techniques, patterns, and standards) for the conception, design, implementation, and evaluation of interactive systems development. Nevertheless, the literature points out that those approaches are not sufficiently used in industry. Taking advantage of the large use of SPCM models, we have identified HCI approaches that could support SPCM practices related to five processes (Requirements Development, Design and Construction of the Product, Product Integration, Verification and Validation) advocated by the MR-MPS-SW. Analyzing the HCI literature and MR-MPS-SW, we identified a set of HCI approaches for interactive systems development. We interviewed twenty HCI experts to validate and improve this initial set. Thus, we identified 14 HCI categories with examples of methods, techniques, patterns, and standards adequate for performing SE practices of the MR-MPS-SW when developing interactive systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The MPS for Software reference model (MR-MPSSW) is one of the models currently used in Brazilian industry. This software process capability maturity (SPCM) model describes Software Engineering (SE) practices that are applied in the software development by the use of approaches (such as methods, techniques and so on). The Human-Computer Interaction (HCI) community has defined specific approaches (methods, techniques, patterns, and standards) for the conception, design, implementation, and evaluation of interactive systems development. Nevertheless, the literature points out that those approaches are not sufficiently used in industry. Taking advantage of the large use of SPCM models, we have identified HCI approaches that could support SPCM practices related to five processes (Requirements Development, Design and Construction of the Product, Product Integration, Verification and Validation) advocated by the MR-MPS-SW. Analyzing the HCI literature and MR-MPS-SW, we identified a set of HCI approaches for interactive systems development. We interviewed twenty HCI experts to validate and improve this initial set. Thus, we identified 14 HCI categories with examples of methods, techniques, patterns, and standards adequate for performing SE practices of the MR-MPS-SW when developing interactive systems.", "fno": "043700a070", "keywords": [ "Capability Maturity Model", "Human Computer Interaction", "Interactive Systems", "Software Development Management", "Software Quality", "MR MPS SW", "Brazilian Industry", "Software Process Capability Maturity Model", "Software Development", "SPCM Models", "HCI Experts", "Interactive System Development", "Software Reference Model", "Software Engineering Practices", "Human Computer Interaction Community", "Human Computer Interaction", "Capability Maturity Model", "Prototypes", "Software", "Standards", "Interactive Systems", "Industries", "Human Computer Interaction Interactive System HCI Approaches Software Process Capability Maturity Model MR MPS SW" ], "authors": [ { "affiliation": "LAMIH CNRS UMR 8201 - Université Polytechnique Hauts-de-France", "fullName": "Taisa Guidini Gonçalves", "givenName": "Taisa", "surname": "Guidini Gonçalves", "__typename": "ArticleAuthorType" }, { "affiliation": "LAMIH CNRS UMR 8201 - Université Polytechnique Hauts-de-France", "fullName": "Káthia Marçal de Oliveira", "givenName": "Káthia", "surname": "Marçal de Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": "LAMIH CNRS UMR 8201 - Université Polytechnique Hauts-de-France", "fullName": "Christophe Kolski", "givenName": "Christophe", "surname": "Kolski", "__typename": "ArticleAuthorType" } ], "idPrefix": "clei", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "70-79", "year": "2018", "issn": null, "isbn": "978-1-7281-0437-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "043700a060", "articleId": "1cdP0cwF1u0", "__typename": "AdjacentArticleType" }, "next": { "fno": "043700a100", "articleId": "1cdP0E8Rq5a", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/quatic/2014/6133/0/6133a028", "title": "Results of 10 Years of Software Process Improvement in Brazil Based on the MPS-SW Model", "doi": null, "abstractUrl": "/proceedings-article/quatic/2014/6133a028/12OmNBRsVwV", "parentPublication": { "id": "proceedings/quatic/2014/6133/0", "title": "2014 9th International Conference on the Quality of Information and Communications Technology (QUATIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/achi/2008/3086/0/3086a265", "title": "Usability Practice: The Appealing Way to HCI", "doi": null, "abstractUrl": "/proceedings-article/achi/2008/3086a265/12OmNBkfRgr", "parentPublication": { "id": "proceedings/achi/2008/3086/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/quatic/2012/4777/0/4777a137", "title": "MPS.BR Program and MPS Model: Main Results, Benefits and Beneficiaries of Software Process Improvement in Brazil", "doi": null, "abstractUrl": "/proceedings-article/quatic/2012/4777a137/12OmNBzRNqI", "parentPublication": { "id": "proceedings/quatic/2012/4777/0", "title": "2012 Eighth International Conference on the Quality of Information and Communications Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2004/8552/0/01408515", "title": "Incorporating HCI into the undergraduate curriculum: Bloom's taxonomy meets the CC'01 curricular guidelines", "doi": null, "abstractUrl": "/proceedings-article/fie/2004/01408515/12OmNCm7BCB", "parentPublication": { "id": "proceedings/fie/2004/8552/0", "title": "34th Annual Frontiers in Education, 2004. FIE 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2012/4525/0/4525a503", "title": "Introduction to Human-Computer Interaction (HCI) Minitrack", "doi": null, "abstractUrl": "/proceedings-article/hicss/2012/4525a503/12OmNyuya1J", "parentPublication": { "id": "proceedings/hicss/2012/4525/0", "title": "2012 45th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisis/2011/4373/0/4373a600", "title": "Novel Human-to-Human Interactions from the Evolution of HCI", "doi": null, "abstractUrl": "/proceedings-article/cisis/2011/4373a600/12OmNyxXluQ", "parentPublication": { "id": "proceedings/cisis/2011/4373/0", "title": "2011 International Conference on Complex, Intelligent, and Software Intensive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2008/3322/2/3322b059", "title": "A Holistic Understanding of HCI Perspectives on Smart Home", "doi": null, "abstractUrl": "/proceedings-article/ncm/2008/3322b059/12OmNzICEDc", "parentPublication": { "id": "proceedings/ncm/2008/3322/2", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sefm/2005/2435/0/24350342", "title": "A unified description formalism for complex HCI-systems", "doi": null, "abstractUrl": "/proceedings-article/sefm/2005/24350342/12OmNzXFoFc", "parentPublication": { "id": "proceedings/sefm/2005/2435/0", "title": "Third IEEE International Conference on Software Engineering and Formal Methods", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2017/02/mmu2017020009", "title": "Multisensory Experiences in HCI", "doi": null, "abstractUrl": "/magazine/mu/2017/02/mmu2017020009/13rRUxOdD56", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2020/7283/0/728300a230", "title": "Hacking User in Human-Computer Interaction Design (HCI)", "doi": null, "abstractUrl": "/proceedings-article/icict/2020/728300a230/1jPb75H54Eo", "parentPublication": { "id": "proceedings/icict/2020/7283/0", "title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxkaR6olG", "doi": "10.1109/VRW50115.2020.00135", "title": "Windtherm Fire: An MR System for Experiencing Breathing Fire of a Dragon", "normalizedTitle": "Windtherm Fire: An MR System for Experiencing Breathing Fire of a Dragon", "abstract": "We present an interactive MR system in which hot wind is sent to a player&#x2019;s face synchronously to the virtual environment. The system uses a VR accessory called Windtherm, which is attachable to a HMD, heats the air inside, and blows the wind with changeable forces and directions. We improve the device to work interactively to the virtual events. We also develop an MR content in which a player interacts with a small dragon and occasionally receives its breathing fire with simultaneous tactile stimulus of actual wind for enhancing the sense of presence in this fictitious world.", "abstracts": [ { "abstractType": "Regular", "content": "We present an interactive MR system in which hot wind is sent to a player&#x2019;s face synchronously to the virtual environment. The system uses a VR accessory called Windtherm, which is attachable to a HMD, heats the air inside, and blows the wind with changeable forces and directions. We improve the device to work interactively to the virtual events. We also develop an MR content in which a player interacts with a small dragon and occasionally receives its breathing fire with simultaneous tactile stimulus of actual wind for enhancing the sense of presence in this fictitious world.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an interactive MR system in which hot wind is sent to a player’s face synchronously to the virtual environment. The system uses a VR accessory called Windtherm, which is attachable to a HMD, heats the air inside, and blows the wind with changeable forces and directions. We improve the device to work interactively to the virtual events. We also develop an MR content in which a player interacts with a small dragon and occasionally receives its breathing fire with simultaneous tactile stimulus of actual wind for enhancing the sense of presence in this fictitious world.", "fno": "09090680", "keywords": [ "Fires", "Face", "Resists", "Virtual Reality", "Fans", "Temperature Sensors", "Visualization", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Devices" ], "authors": [ { "affiliation": "Tokyo Denki University", "fullName": "Yuta Ogiwara", "givenName": "Yuta", "surname": "Ogiwara", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Masatoshi Suzuki", "givenName": "Masatoshi", "surname": "Suzuki", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Denki University", "fullName": "Akihiro Matsuura", "givenName": "Akihiro", "surname": "Matsuura", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "567-568", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090451", "articleId": "1jIxx6vIU6s", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090600", "articleId": "1jIxwQO6LXa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrais/1993/1363/0/00380743", "title": "The MR Toolkit Peers Package and experiment", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380743/12OmNqJq4F7", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836524", "title": "Perceptual Issues of a Passive Haptics Feedback Based MR System", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836524/12OmNxecS4t", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/superc/2001/293/0/01592803", "title": "High Resolution Weather Modeling for Improved Fire Management", "doi": null, "abstractUrl": "/proceedings-article/superc/2001/01592803/12OmNzGlRzr", "parentPublication": { "id": "proceedings/superc/2001/293/0", "title": "ACM/IEEE SC 2001 Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a766", "title": "MR-RIEW: An MR Toolkit for Designing Remote Immersive Experiment Workflows", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a766/1CJemuUb5Be", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2001/1990/0/01592803", "title": "High Resolution Weather Modeling for Improved Fire Management", "doi": null, "abstractUrl": "/proceedings-article/sc/2001/01592803/1MEX5D9OJyM", "parentPublication": { "id": "proceedings/sc/2001/1990/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a374", "title": "A Virtual Reality Cycling System Based On Multi-Sensor Fusion", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a374/1vg7TJ5Nwti", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHq6Mn0tUc", "doi": "10.1109/ICPR56361.2022.9956354", "title": "Channel-Spatial Mutual Attention Network for 360&#x00B0; Salient Object Detection", "normalizedTitle": "Channel-Spatial Mutual Attention Network for 360° Salient Object Detection", "abstract": "In this work, we conduct 360&#x00B0; panoramic salient object detection by taking advantage of both the global and local visual cues of 360&#x00B0; images, with a novel channel-spatial mutual attention network (CSMA-Net). The key component of the CSMA-Net is the proposed CSMA module, which cascades channel-/spatial-weighting-based mutual attentions. The objective of our CSMA module is to refine and fuse the bottleneck features from two separate encoders with different planar representations of 360&#x00B0; panorama as inputs, i.e., equirectangular image and cube map. Our CSMA-Net outperforms 10 state-of-the-art segmentation methods based on the proposed 360&#x00B0; SOD benchmark where multiple fine-tuning and testing strategies are applied to the widely-used 360&#x00B0; datasets. Extensive experimental results illustrate the effectiveness and robustness of the proposed CSMA-Net<sup>1</sup>.", "abstracts": [ { "abstractType": "Regular", "content": "In this work, we conduct 360&#x00B0; panoramic salient object detection by taking advantage of both the global and local visual cues of 360&#x00B0; images, with a novel channel-spatial mutual attention network (CSMA-Net). The key component of the CSMA-Net is the proposed CSMA module, which cascades channel-/spatial-weighting-based mutual attentions. The objective of our CSMA module is to refine and fuse the bottleneck features from two separate encoders with different planar representations of 360&#x00B0; panorama as inputs, i.e., equirectangular image and cube map. Our CSMA-Net outperforms 10 state-of-the-art segmentation methods based on the proposed 360&#x00B0; SOD benchmark where multiple fine-tuning and testing strategies are applied to the widely-used 360&#x00B0; datasets. Extensive experimental results illustrate the effectiveness and robustness of the proposed CSMA-Net<sup>1</sup>.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work, we conduct 360° panoramic salient object detection by taking advantage of both the global and local visual cues of 360° images, with a novel channel-spatial mutual attention network (CSMA-Net). The key component of the CSMA-Net is the proposed CSMA module, which cascades channel-/spatial-weighting-based mutual attentions. The objective of our CSMA module is to refine and fuse the bottleneck features from two separate encoders with different planar representations of 360° panorama as inputs, i.e., equirectangular image and cube map. Our CSMA-Net outperforms 10 state-of-the-art segmentation methods based on the proposed 360° SOD benchmark where multiple fine-tuning and testing strategies are applied to the widely-used 360° datasets. Extensive experimental results illustrate the effectiveness and robustness of the proposed CSMA-Net1.", "fno": "09956354", "keywords": [ "Image Segmentation", "Object Detection", "360 X 00 B 0 Panorama Planar Representations", "360 X 00 B 0 SOD Benchmark", "Cascades Channel Spatial Weighting Based Mutual Attentions", "Channel Spatial Mutual Attention Network", "CSMA Module", "CSMA Net", "Cube Map", "Equirectangular Image", "Global Cues", "Local Visual Cues", "Mutual Attentions", "Panoramic Salient Object Detection", "Deep Learning", "Visualization", "Image Segmentation", "Fuses", "Object Detection", "Benchmark Testing", "Robustness" ], "authors": [ { "affiliation": "Univ Rennes, INSA Rennes, CNRS, IETR - UMR 6164,Rennes,France", "fullName": "Yi Zhang", "givenName": "Yi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ Rennes, INSA Rennes, CNRS, IETR - UMR 6164,Rennes,France", "fullName": "Wassim Hamidouche", "givenName": "Wassim", "surname": "Hamidouche", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ Rennes, INSA Rennes, CNRS, IETR - UMR 6164,Rennes,France", "fullName": "Olivier Deforges", "givenName": "Olivier", "surname": "Deforges", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "3436-3442", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956480", "articleId": "1IHpEuZm1JS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956578", "articleId": "1IHqeJNeVvq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2022/9617/0/961700a001", "title": "Bullet Comments for 360&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a001/1CJcgerbwNa", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0169", "title": "Viewport Proposal CNN for 360&#x00B0; Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093452", "title": "Visual Question Answering on 360&#x00B0; Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093452/1jPbCyCHgkw", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09105946", "title": "Interactive 360&#x00B0; Narrative for TV use", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09105946/1kwqIbEPjhe", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09212608", "title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2020/7374/0/737400a191", "title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY", "parentPublication": { "id": "proceedings/wowmom/2020/7374/0", "title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199564", "title": "Stage-wise Salient Object Detection in 360&#x00B0; Omnidirectional Image via Object-level Semantical Saliency Ranking", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199564/1ncgt74HUIM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428439", "title": "OLANET: Self-Supervised 360&#x00B0; Depth Estimation with Effective Distortion-Aware View Synthesis and L1 Smooth Regularization", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428439/1uilCx8oUqQ", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100d743", "title": "Simple baselines can fool 360&#x00B0; saliency metrics", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100d743/1yNiDufgtWg", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0586", "title": "Lighting, Reflectance and Geometry Estimation from 360&#x00B0; Panoramic Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0586/1yeIplXJ9wQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jPbbHBGDHq", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jPbeCzxg5y", "doi": "10.1109/WACV45572.2020.9093283", "title": "Weakly-Supervised Multi-Person Action Recognition in 360&#x00B0; Videos", "normalizedTitle": "Weakly-Supervised Multi-Person Action Recognition in 360° Videos", "abstract": "The recent development of commodity 360&#x00B0; cameras have enabled a single video to capture an entire scene, which endows promising potentials in surveillance scenarios. However, research in omnidirectional video analysis has lagged behind the hardware advances. In this work, we address the important problem of action recognition in topview 360&#x00B0; videos. Due to the wide filed-of-view, 360&#x00B0; videos usually capture multiple people performing actions at the same time. Furthermore, the appearance of people are deformed. The proposed framework first transforms top-view omnidirectional videos into panoramic videos using a calibrationfree method. Then spatial-temporal features are extracted using region-based 3D CNNs for action recognition. We propose a weakly-supervised method based on multiinstance multi-label learning, which trains the model to recognize and localize multiple actions in a video using only video-level action labels as supervision. We perform experiments to quantitatively validate the efficacy of the proposed method over state-of-the-art baselines and variants of our model, and qualitatively demonstrate action localization results. To enable research in this direction, we introduce the 360Action dataset. It is the first omnidirectional video dataset for multi-person action recognition with a diverse set of scenes, actors and actions. The dataset is available at https://github.com/ryukenzen/360action.", "abstracts": [ { "abstractType": "Regular", "content": "The recent development of commodity 360&#x00B0; cameras have enabled a single video to capture an entire scene, which endows promising potentials in surveillance scenarios. However, research in omnidirectional video analysis has lagged behind the hardware advances. In this work, we address the important problem of action recognition in topview 360&#x00B0; videos. Due to the wide filed-of-view, 360&#x00B0; videos usually capture multiple people performing actions at the same time. Furthermore, the appearance of people are deformed. The proposed framework first transforms top-view omnidirectional videos into panoramic videos using a calibrationfree method. Then spatial-temporal features are extracted using region-based 3D CNNs for action recognition. We propose a weakly-supervised method based on multiinstance multi-label learning, which trains the model to recognize and localize multiple actions in a video using only video-level action labels as supervision. We perform experiments to quantitatively validate the efficacy of the proposed method over state-of-the-art baselines and variants of our model, and qualitatively demonstrate action localization results. To enable research in this direction, we introduce the 360Action dataset. It is the first omnidirectional video dataset for multi-person action recognition with a diverse set of scenes, actors and actions. The dataset is available at https://github.com/ryukenzen/360action.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The recent development of commodity 360° cameras have enabled a single video to capture an entire scene, which endows promising potentials in surveillance scenarios. However, research in omnidirectional video analysis has lagged behind the hardware advances. In this work, we address the important problem of action recognition in topview 360° videos. Due to the wide filed-of-view, 360° videos usually capture multiple people performing actions at the same time. Furthermore, the appearance of people are deformed. The proposed framework first transforms top-view omnidirectional videos into panoramic videos using a calibrationfree method. Then spatial-temporal features are extracted using region-based 3D CNNs for action recognition. We propose a weakly-supervised method based on multiinstance multi-label learning, which trains the model to recognize and localize multiple actions in a video using only video-level action labels as supervision. We perform experiments to quantitatively validate the efficacy of the proposed method over state-of-the-art baselines and variants of our model, and qualitatively demonstrate action localization results. To enable research in this direction, we introduce the 360Action dataset. It is the first omnidirectional video dataset for multi-person action recognition with a diverse set of scenes, actors and actions. The dataset is available at https://github.com/ryukenzen/360action.", "fno": "09093283", "keywords": [ "Cameras", "Convolutional Neural Nets", "Feature Extraction", "Image Motion Analysis", "Image Recognition", "Image Sequences", "Learning Artificial Intelligence", "Video Signal Processing", "Panoramic Videos", "Calibration Free Method", "Spatial Temporal Features Extraction", "Region Based 3 D CN Ns", "Weakly Supervised Method", "Multiinstance Multilabel Learning", "Video Level Action", "Omnidirectional Video Dataset", "360 X 00 B 0 Cameras", "360 X 00 B 0 Videos", "Top View Omnidirectional Videos", "360 Action Dataset", "Multiperson Action Recognition", "Videos", "Cameras", "Feature Extraction", "Three Dimensional Displays", "Radio Frequency", "Transforms", "Training" ], "authors": [ { "affiliation": "National University of Singapore,School of Computing", "fullName": "Junnan Li", "givenName": "Junnan", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Corporation", "fullName": "Jianquan Liu", "givenName": "Jianquan", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore,School of Computing", "fullName": "Yongkang Wang", "givenName": "Yongkang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Corporation", "fullName": "Shoji Nishimura", "givenName": "Shoji", "surname": "Nishimura", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore,School of Computing", "fullName": "Mohan S. Kankanhalli", "givenName": "Mohan S.", "surname": "Kankanhalli", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "497-505", "year": "2020", "issn": null, "isbn": "978-1-7281-6553-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09093575", "articleId": "1jPbc5503Ac", "__typename": "AdjacentArticleType" }, "next": { "fno": "09093612", "articleId": "1jPbcxnMhH2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2018/4886/0/488601b405", "title": "Stabilizing First Person 360 Degree Videos", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797805", "title": "Encouraging Rehabilitation Trials: The Potential of 360&#x00B0; Immersive Instruction Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797805/1cJ13iaKgve", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a081", "title": "User Experience Study of 360&#x00B0; Music Videos on Computer Monitor and Virtual Reality Goggles", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a081/1cMFaY4kg6I", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2020/7374/0/737400a191", "title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY", "parentPublication": { "id": "proceedings/wowmom/2020/7374/0", "title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a065", "title": "Between the Frames - Evaluation of Various Motion Interpolation Algorithms to Improve 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a065/1qBbIgvfx6g", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a169", "title": "Mitigation of Cybersickness in Immersive 360&#x00B0;Videos", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a169/1tnWBZd1n56", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a510", "title": "The Effect of Camera Height on The User Experience of Mid-air 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a510/1tnXMvwgvmg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a170", "title": "Bidirectional Shadow Rendering for Interactive Mixed 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a170/1tuAEjkRUZy", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/04/09487520", "title": "Saliency Computation for Virtual Cinematography in 360&#x00B0; Videos", "doi": null, "abstractUrl": "/magazine/cg/2021/04/09487520/1vg3jOq7WvK", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a042", "title": "Rating Duration Analysis for Subjective Quality Assessment of 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a042/1vg7TpMdSH6", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qyxi3OgORy", "title": "2020 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qyxie46ie4", "doi": "10.1109/3DV50981.2020.00062", "title": "Deep Depth Estimation on 360&#x00B0; Images with a Double Quaternion Loss", "normalizedTitle": "Deep Depth Estimation on 360° Images with a Double Quaternion Loss", "abstract": "While 360&#x00B0; images are becoming ubiquitous due to popularity of panoramic content, they cannot directly work with most of the existing depth estimation techniques developed for perspective images. In this paper, we present a deep-learning-based framework of estimating depth from 360&#x00B0; images. We present an adaptive depth refinement procedure that refines depth estimates using normal estimates and pixel-wise uncertainty scores. We introduce double quaternion approximation to combine the loss of the joint estimation of depth and surface normal. Furthermore, we use the double quaternion formulation to also measure stereo consistency between the horizontally displaced depth maps, leading to a new loss function for training a depth estimation CNN. Results show that the new double-quaternion-based loss and the adaptive depth refinement procedure lead to better network performance. Our proposed method can be used with monocular as well as stereo images. When evaluated on several datasets, our method surpasses state-of-the-art methods on most metrics.", "abstracts": [ { "abstractType": "Regular", "content": "While 360&#x00B0; images are becoming ubiquitous due to popularity of panoramic content, they cannot directly work with most of the existing depth estimation techniques developed for perspective images. In this paper, we present a deep-learning-based framework of estimating depth from 360&#x00B0; images. We present an adaptive depth refinement procedure that refines depth estimates using normal estimates and pixel-wise uncertainty scores. We introduce double quaternion approximation to combine the loss of the joint estimation of depth and surface normal. Furthermore, we use the double quaternion formulation to also measure stereo consistency between the horizontally displaced depth maps, leading to a new loss function for training a depth estimation CNN. Results show that the new double-quaternion-based loss and the adaptive depth refinement procedure lead to better network performance. Our proposed method can be used with monocular as well as stereo images. When evaluated on several datasets, our method surpasses state-of-the-art methods on most metrics.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "While 360° images are becoming ubiquitous due to popularity of panoramic content, they cannot directly work with most of the existing depth estimation techniques developed for perspective images. In this paper, we present a deep-learning-based framework of estimating depth from 360° images. We present an adaptive depth refinement procedure that refines depth estimates using normal estimates and pixel-wise uncertainty scores. We introduce double quaternion approximation to combine the loss of the joint estimation of depth and surface normal. Furthermore, we use the double quaternion formulation to also measure stereo consistency between the horizontally displaced depth maps, leading to a new loss function for training a depth estimation CNN. Results show that the new double-quaternion-based loss and the adaptive depth refinement procedure lead to better network performance. Our proposed method can be used with monocular as well as stereo images. When evaluated on several datasets, our method surpasses state-of-the-art methods on most metrics.", "fno": "812800a524", "keywords": [ "Convolutional Neural Nets", "Deep Learning Artificial Intelligence", "Stereo Image Processing", "Joint Estimation", "Surface Normal Estimation", "Double Quaternion Formulation", "Horizontally Displaced Depth Maps", "Loss Function", "Depth Estimation CNN", "Double Quaternion Based Loss", "Stereo Images", "Deep Depth Estimation", "360 X 00 B 0 Images", "Panoramic Content", "Depth Estimation Techniques", "Perspective Images", "Normal Estimates", "Pixel Wise Uncertainty Scores", "Double Quaternion Approximation", "Monocular Images", "Adaptive Depth Refinement Procedure", "Deep Learning Based Framework", "Stereo Consistency", "Network Performance", "Estimation", "Quaternions", "Three Dimensional Displays", "Training", "Kernel", "Uncertainty", "Distortion" ], "authors": [ { "affiliation": "University of Maryland,College Park", "fullName": "Brandon Yushan Feng", "givenName": "Brandon Yushan", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Wangjue Yao", "givenName": "Wangjue", "surname": "Yao", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Virginia", "fullName": "Zheyuan Liu", "givenName": "Zheyuan", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Amitabh Varshney", "givenName": "Amitabh", "surname": "Varshney", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "524-533", "year": "2020", "issn": null, "isbn": "978-1-7281-8128-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "812800a514", "articleId": "1qyxmHey0PS", "__typename": "AdjacentArticleType" }, "next": { "fno": "812800a534", "articleId": "1qyxkt31fQA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2018/4195/0/08551577", "title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360&#x00B0; Video Network Multicast", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08661657", "title": "Motion parallax for 360&#x00B0; RGBD video", "doi": null, "abstractUrl": "/journal/tg/2019/05/08661657/18bmQqdj3Nu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a001", "title": "Bullet Comments for 360&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a001/1CJcgerbwNa", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859817", "title": "Omni-NeRF: Neural Radiance Field from 360&#x00B0; Image Captures", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859817/1G9DIJAkSzK", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d752", "title": "360MonoDepth: High-Resolution 360&#x00B0; Monocular Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d752/1H1mgCrsMtG", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a258", "title": "360&#x00B0; Surface Regression with a Hyper-Sphere Loss", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a258/1ezRDMEgU3C", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093262", "title": "360-Indoor: Towards Learning Real-World Objects in 360&#x00B0; Indoor Equirectangular Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093452", "title": "Visual Question Answering on 360&#x00B0; Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093452/1jPbCyCHgkw", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ucc/2020/2394/0/239400a414", "title": "Accuracy Analysis on 360&#x00B0; Virtual Reality Video Quality Assessment Methods", "doi": null, "abstractUrl": "/proceedings-article/ucc/2020/239400a414/1pZ0Z6h4ERq", "parentPublication": { "id": "proceedings/ucc/2020/2394/0", "title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428439", "title": "OLANET: Self-Supervised 360&#x00B0; Depth Estimation with Effective Distortion-Aware View Synthesis and L1 Smooth Regularization", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428439/1uilCx8oUqQ", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzwpUa0", "title": "Cognitive Technology, International Conference on", "acronym": "ct", "groupId": "1002293", "volume": "0", "displayVolume": "0", "year": "1997", "__typename": "ProceedingType" }, "article": { "id": "12OmNx38vVh", "doi": "10.1109/CT.1997.617676", "title": "The Cyborg's Dilemma: Embodiment in Virtual Environments", "normalizedTitle": "The Cyborg's Dilemma: Embodiment in Virtual Environments", "abstract": "This paper poses the question: How does the representation of the body in virtual environments affect the mind? This article considers how virtual reality interfaces are evolving to progressively embody the user. The effect of embodiment on the sensation of physical presence, social presence, and self presence in virtual environments is discussed. The effect of avatar representation on body image and body schema distortion is also considered.", "abstracts": [ { "abstractType": "Regular", "content": "This paper poses the question: How does the representation of the body in virtual environments affect the mind? This article considers how virtual reality interfaces are evolving to progressively embody the user. The effect of embodiment on the sensation of physical presence, social presence, and self presence in virtual environments is discussed. The effect of avatar representation on body image and body schema distortion is also considered.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper poses the question: How does the representation of the body in virtual environments affect the mind? This article considers how virtual reality interfaces are evolving to progressively embody the user. The effect of embodiment on the sensation of physical presence, social presence, and self presence in virtual environments is discussed. The effect of avatar representation on body image and body schema distortion is also considered.", "fno": "80840012", "keywords": [ "Human Computer Interaction", "Virtual Reality", "Embodiment", "Presence", "Cyborgs" ], "authors": [ { "affiliation": "Media Interface & Network Design (M.I.N.D.) Lab Michigan State University", "fullName": "Frank Biocca", "givenName": "Frank", "surname": "Biocca", "__typename": "ArticleAuthorType" } ], "idPrefix": "ct", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1997-08-01T00:00:00", "pubType": "proceedings", "pages": "12", "year": "1997", "issn": null, "isbn": "0-8186-8084-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00617697", "articleId": "1dPo8pkVEBi", "__typename": "AdjacentArticleType" }, "next": { "fno": "80840027", "articleId": "12OmNCdBDVV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirv", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "acronym": "var4good", "groupId": "1829024", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Xtvpep", "doi": "10.1109/VAR4GOOD.2018.8576885", "title": "Degraded Reality: Using VR/AR to simulate visual impairments", "normalizedTitle": "Degraded Reality: Using VR/AR to simulate visual impairments", "abstract": "The effects of eye disease cannot be depicted accurately using traditional media. Consequently, public understanding of eye disease is often poor. We present a VR/AR system for simulating common visual impairments, including disability glare, spatial distortions (Metamorphopsia), the selective blurring and filling-in of information across the visual field, and color vision deficits. Unlike most existing simulators, the simulations are informed by patients' self-reported symptoms, can be quantitatively manipulated to provide custom disease profiles, and support gaze-contingent presentation (i.e., when using a VR/AR headset that contains eye-tracking technology, such as the Fove0). Such a simulator could be used as a teaching/empathy aid, or as a tool for evaluating the accessibility of new products and environments.", "abstracts": [ { "abstractType": "Regular", "content": "The effects of eye disease cannot be depicted accurately using traditional media. Consequently, public understanding of eye disease is often poor. We present a VR/AR system for simulating common visual impairments, including disability glare, spatial distortions (Metamorphopsia), the selective blurring and filling-in of information across the visual field, and color vision deficits. Unlike most existing simulators, the simulations are informed by patients' self-reported symptoms, can be quantitatively manipulated to provide custom disease profiles, and support gaze-contingent presentation (i.e., when using a VR/AR headset that contains eye-tracking technology, such as the Fove0). Such a simulator could be used as a teaching/empathy aid, or as a tool for evaluating the accessibility of new products and environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The effects of eye disease cannot be depicted accurately using traditional media. Consequently, public understanding of eye disease is often poor. We present a VR/AR system for simulating common visual impairments, including disability glare, spatial distortions (Metamorphopsia), the selective blurring and filling-in of information across the visual field, and color vision deficits. Unlike most existing simulators, the simulations are informed by patients' self-reported symptoms, can be quantitatively manipulated to provide custom disease profiles, and support gaze-contingent presentation (i.e., when using a VR/AR headset that contains eye-tracking technology, such as the Fove0). Such a simulator could be used as a teaching/empathy aid, or as a tool for evaluating the accessibility of new products and environments.", "fno": "08576885", "keywords": [ "Augmented Reality", "Digital Simulation", "Diseases", "Eye", "Handicapped Aids", "Vision Defects", "Visual Perception", "Custom Disease Profiles", "Gaze Contingent Presentation", "Eye Tracking Technology", "Degraded Reality", "Eye Disease", "Traditional Media", "Public Understanding", "Simulating Common Visual Impairments", "Disability Glare", "Spatial Distortions", "Selective Blurring", "Visual Field", "Color Vision Deficits", "Existing Simulators", "VR AR System", "VR AR Headset", "Visualization", "Kernel", "Image Color Analysis", "Diseases", "Distortion", "Augmented Reality", "Retina", "Computing Methodologies X 2014 Graphics Systems And Interfaces X 2014 Mixed Augmented Reality", "Computing Methodologies X 2014 Computer Graphics X 2014 Image Manipulation X 2014 Image Processing", "Modeling And Simulation X 2014 Simulation Types And Techniques X 2014 Real Time Simulation" ], "authors": [ { "affiliation": "UCL Institute of Ophthalmology", "fullName": "Pete R. Jones", "givenName": "Pete R.", "surname": "Jones", "__typename": "ArticleAuthorType" }, { "affiliation": "City, University of London", "fullName": "Giovanni Ometto", "givenName": "Giovanni", "surname": "Ometto", "__typename": "ArticleAuthorType" } ], "idPrefix": "var4good", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1-4", "year": "2018", "issn": null, "isbn": "978-1-5386-5977-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08576892", "articleId": "17D45Xtvp8L", "__typename": "AdjacentArticleType" }, "next": { "fno": "08576889", "articleId": "17D45XfSET0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2012/4725/0/4725a116", "title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a116/12OmNAYoKsE", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836515", "title": "Empower VR Art and AR Book with Spatial Interaction", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836515/12OmNB9t6u0", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a253", "title": "Workshop on VR and AR meet creative industries", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a253/12OmNylKASp", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480774", "title": "VARU Framework: Enabling Rapid Prototyping of VR, AR and Ubiquitous Applications", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480774/12OmNzC5Tgt", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007246", "title": "AR Feels &#x201c;Softer&#x201d; than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699200", "title": "Effective Free Field of View Scene Exploration in VR and AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798148", "title": "CAVE-AR: A VR Authoring System to Interactively Design, Simulate, and Debug Multi-user AR Experiences", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798148/1cJ0FRS6rjG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794584", "title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199574", "title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a599", "title": "CDVVAR: VR/AR Collaborative Data Visualization Tool", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a599/1tnXiU5GF9K", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7WfSClY5y", "doi": "10.1109/ISMAR-Adjunct57072.2022.00124", "title": "Minimising the duration of a system-controlled virtual reality tour", "normalizedTitle": "Minimising the duration of a system-controlled virtual reality tour", "abstract": "The duration of a virtual reality tour can be a crucial factor in affecting user experience. Lengthy exposure to a virtual environment can cause severe health issues similar to motion sickness, and wearing the Head-Mounted Display for a long duration can make the user fatigued. Minimising the duration of VR exposure can always help in addressing these issues. In this work, we have proposed an approach to minimise the duration of a system-controlled virtual tour by optimising the path connecting all the sites of the virtual environment. To optimise the duration, we theoretically compute the optimal time and the path to cover all the places of a virtual environment by reducing the problem to the famous Vehicle Routing Problem (VRP). Following our approach, we have also created a VR tour for the largest river island in the world, Majuli (Assam, India).", "abstracts": [ { "abstractType": "Regular", "content": "The duration of a virtual reality tour can be a crucial factor in affecting user experience. Lengthy exposure to a virtual environment can cause severe health issues similar to motion sickness, and wearing the Head-Mounted Display for a long duration can make the user fatigued. Minimising the duration of VR exposure can always help in addressing these issues. In this work, we have proposed an approach to minimise the duration of a system-controlled virtual tour by optimising the path connecting all the sites of the virtual environment. To optimise the duration, we theoretically compute the optimal time and the path to cover all the places of a virtual environment by reducing the problem to the famous Vehicle Routing Problem (VRP). Following our approach, we have also created a VR tour for the largest river island in the world, Majuli (Assam, India).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The duration of a virtual reality tour can be a crucial factor in affecting user experience. Lengthy exposure to a virtual environment can cause severe health issues similar to motion sickness, and wearing the Head-Mounted Display for a long duration can make the user fatigued. Minimising the duration of VR exposure can always help in addressing these issues. In this work, we have proposed an approach to minimise the duration of a system-controlled virtual tour by optimising the path connecting all the sites of the virtual environment. To optimise the duration, we theoretically compute the optimal time and the path to cover all the places of a virtual environment by reducing the problem to the famous Vehicle Routing Problem (VRP). Following our approach, we have also created a VR tour for the largest river island in the world, Majuli (Assam, India).", "fno": "536500a600", "keywords": [ "Health Hazards", "Helmet Mounted Displays", "Human Factors", "Optimisation", "Rivers", "Virtual Reality", "Head Mounted Display", "Lengthy Exposure", "Severe Health Issues", "System Controlled Virtual Reality Tour", "System Controlled Virtual Tour", "User Experience", "Virtual Environment", "VR Exposure", "VR Tour", "Head Mounted Displays", "Vehicle Routing", "Virtual Environments", "Motion Sickness", "Rivers", "Augmented Reality", "Virtual Reality Virtual Reality Tour VR Sickness Duration", "Virtual Reality Virtual Tour Length Optimal Virtual Tour" ], "authors": [ { "affiliation": "Indian Institute of Technology Guwahati", "fullName": "Nilotpal Biswas", "givenName": "Nilotpal", "surname": "Biswas", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Technology Guwahati", "fullName": "Debangshu Banerjee", "givenName": "Debangshu", "surname": "Banerjee", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Technology Guwahati", "fullName": "Samit Bhattacharya", "givenName": "Samit", "surname": "Bhattacharya", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "600-604", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a594", "articleId": "1J7WjkWVarS", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a605", "articleId": "1J7WeVJrh5K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2006/0224/0/02240287", "title": "Vertical Vergence Calibration for Augmented Reality Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2006/02240287/12OmNwErpUc", "parentPublication": { "id": "proceedings/vr/2006/0224/0", "title": "IEEE Virtual Reality Conference (VR 2006)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a542", "title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a840", "title": "A Location-Triggered Augmented Reality Walking Tour Using Snap Spectacles 2021", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a840/1CJdGu5zAoE", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a533", "title": "Getting the Most out of Virtual Reality: Evaluating Short Breaks to Reduce Cybersickness and Cognitive Aftereffects", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a533/1CJfa6K7KXm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a013", "title": "The Impact of Non-immersive Virtual Reality Technologies on Consumers&#x0027; Behaviors in real estate: A Website&#x0027;s Perspective", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a013/1J7W7B41hyE", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/03/08937822", "title": "Design of a Virtual Reality Tour System for People With Intellectual and Developmental Disabilities: A Case Study", "doi": null, "abstractUrl": "/magazine/cs/2020/03/08937822/1fUSRHSldhm", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089604", "title": "Examining Whether Secondary Effects of Temperature-Associated Virtual Stimuli Influence Subjective Perception of Duration", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089604/1jIxg8GOOUo", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090433", "title": "Virtual Tour: An Immersive Low Cost Telepresence System", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199574", "title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a344", "title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIx7fmpQ9a", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxf3ZEs0w", "doi": "10.1109/VR46266.2020.00113", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "normalizedTitle": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "abstract": "Augmented reality head-worn displays (AR HWDs) have the potential to assist personal computing and the acquisition of everyday information. In this research, we propose Glanceable AR, an interaction paradigm for accessing information in AR HWDs. In Glanceable AR, secondary information resides at the periphery of vision to stay unobtrusive and can be accessed by a quick glance whenever needed. We propose two novel hands-free interfaces: \"head-glance\", in which virtual contents are fixed to the user&#x2019;s body and can be accessed by head rotation, and \"gaze-summon\" in which contents can be \"summoned\" into central vision by eye-tracked gazing at the periphery. We compared these techniques with a baseline heads-up display (HUD), which we call \"eye-glance\" interface in two dual-task scenarios. We found that the head-glance and eye-glance interfaces are more preferred and more efficient than the gaze-summon interface for discretionary information access. For a continuous monitoring task, the eye-glance interface was preferred. We discuss the implications of our findings for designing Glanceable AR interfaces in AR HWDs.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented reality head-worn displays (AR HWDs) have the potential to assist personal computing and the acquisition of everyday information. In this research, we propose Glanceable AR, an interaction paradigm for accessing information in AR HWDs. In Glanceable AR, secondary information resides at the periphery of vision to stay unobtrusive and can be accessed by a quick glance whenever needed. We propose two novel hands-free interfaces: \"head-glance\", in which virtual contents are fixed to the user&#x2019;s body and can be accessed by head rotation, and \"gaze-summon\" in which contents can be \"summoned\" into central vision by eye-tracked gazing at the periphery. We compared these techniques with a baseline heads-up display (HUD), which we call \"eye-glance\" interface in two dual-task scenarios. We found that the head-glance and eye-glance interfaces are more preferred and more efficient than the gaze-summon interface for discretionary information access. For a continuous monitoring task, the eye-glance interface was preferred. We discuss the implications of our findings for designing Glanceable AR interfaces in AR HWDs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented reality head-worn displays (AR HWDs) have the potential to assist personal computing and the acquisition of everyday information. In this research, we propose Glanceable AR, an interaction paradigm for accessing information in AR HWDs. In Glanceable AR, secondary information resides at the periphery of vision to stay unobtrusive and can be accessed by a quick glance whenever needed. We propose two novel hands-free interfaces: \"head-glance\", in which virtual contents are fixed to the user’s body and can be accessed by head rotation, and \"gaze-summon\" in which contents can be \"summoned\" into central vision by eye-tracked gazing at the periphery. We compared these techniques with a baseline heads-up display (HUD), which we call \"eye-glance\" interface in two dual-task scenarios. We found that the head-glance and eye-glance interfaces are more preferred and more efficient than the gaze-summon interface for discretionary information access. For a continuous monitoring task, the eye-glance interface was preferred. We discuss the implications of our findings for designing Glanceable AR interfaces in AR HWDs.", "fno": "09089433", "keywords": [ "Augmented Reality", "User Interfaces", "Human Computer Interaction", "Head Mounted Displays", "Human Centered Computing", "Mixed Augmented Reality", "Human Centered Computing", "User Interface Design" ], "authors": [ { "affiliation": "Virginia Tech,Center for Human-Computer Interaction,Blacksburg,VA,United States", "fullName": "Feiyu Lu", "givenName": "Feiyu", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Center for Human-Computer Interaction,Blacksburg,VA,United States", "fullName": "Shakiba Davari", "givenName": "Shakiba", "surname": "Davari", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Center for Human-Computer Interaction,Blacksburg,VA,United States", "fullName": "Lee Lisle", "givenName": "Lee", "surname": "Lisle", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Center for Human-Computer Interaction,Blacksburg,VA,United States", "fullName": "Yuan Li", "givenName": "Yuan", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Center for Human-Computer Interaction,Blacksburg,VA,United States", "fullName": "Doug A. Bowman", "givenName": "Doug A.", "surname": "Bowman", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "930-939", "year": "2020", "issn": null, "isbn": "978-1-7281-5608-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09089552", "articleId": "1jIx8sfGbSw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09089540", "articleId": "1jIxarbH6AU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671800", "title": "Subtle cueing for visual search in head-tracked head worn displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671800/12OmNylbovt", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2008/3268/0/3268a503", "title": "Optimal Font Size for Head-Mounted-Displays in Outdoor Applications", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a503/12OmNzd7bBd", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a876", "title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a285", "title": "Birds vs. Fish: Visualizing Out-of-View Objects in Augmented Reality using 3D Minimaps", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a285/1gysmdpyM3C", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090631", "title": "Framing the Scene: An Examination of Augmented Reality Head Worn Displays in Construction Assembly Tasks", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090631/1jIxyGx0KXK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199574", "title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09253561", "title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe", "doi": null, "abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a768", "title": "Evaluating the Potential of Glanceable AR Interfaces for Authentic Everyday Uses", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a768/1tuAQLvc5WM", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a026", "title": "Designing Historical Tours for Head-Worn AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a026/1yeQLJO3TNu", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a159", "title": "Exploring the Effect of Visual Cues on Eye Gaze During AR-Guided Picking and Assembly Tasks", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a159/1yeQM18rD7G", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIx7fmpQ9a", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxg8GOOUo", "doi": "10.1109/VR46266.2020.00070", "title": "Examining Whether Secondary Effects of Temperature-Associated Virtual Stimuli Influence Subjective Perception of Duration", "normalizedTitle": "Examining Whether Secondary Effects of Temperature-Associated Virtual Stimuli Influence Subjective Perception of Duration", "abstract": "Past work in augmented reality has shown that temperature-associated AR stimuli can induce warming and cooling sensations in the user, and prior work in psychology suggests that a person&#x2019;s body temperature can influence that person&#x2019;s sense of subjective perception of duration. In this paper, we present a user study to evaluate the relationship between temperature-associated virtual stimuli presented on an AR-HMD and the user&#x2019;s sense of subjective perception of duration and temperature. In particular, we investigate two independent variables: the apparent temperature of the virtual stimuli presented to the participant, which could be hot or cold, and the location of the stimuli, which could be in direct contact with the user, in indirect contact with the user, or both in direct and indirect contact simultaneously. We investigate how these variables affect the users&#x2019; perception of duration and perception of body and environment temperature by having participants make prospective time estimations while observing the virtual stimulus and answering subjective questions regarding their body and environment temperatures. Our work confirms that temperature-associated virtual stimuli are capable of having significant effects on the users&#x2019; perception of temperature, and highlights a possible limitation in the current augmented reality technology in that no secondary effects on the users&#x2019; perception of duration were observed.", "abstracts": [ { "abstractType": "Regular", "content": "Past work in augmented reality has shown that temperature-associated AR stimuli can induce warming and cooling sensations in the user, and prior work in psychology suggests that a person&#x2019;s body temperature can influence that person&#x2019;s sense of subjective perception of duration. In this paper, we present a user study to evaluate the relationship between temperature-associated virtual stimuli presented on an AR-HMD and the user&#x2019;s sense of subjective perception of duration and temperature. In particular, we investigate two independent variables: the apparent temperature of the virtual stimuli presented to the participant, which could be hot or cold, and the location of the stimuli, which could be in direct contact with the user, in indirect contact with the user, or both in direct and indirect contact simultaneously. We investigate how these variables affect the users&#x2019; perception of duration and perception of body and environment temperature by having participants make prospective time estimations while observing the virtual stimulus and answering subjective questions regarding their body and environment temperatures. Our work confirms that temperature-associated virtual stimuli are capable of having significant effects on the users&#x2019; perception of temperature, and highlights a possible limitation in the current augmented reality technology in that no secondary effects on the users&#x2019; perception of duration were observed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Past work in augmented reality has shown that temperature-associated AR stimuli can induce warming and cooling sensations in the user, and prior work in psychology suggests that a person’s body temperature can influence that person’s sense of subjective perception of duration. In this paper, we present a user study to evaluate the relationship between temperature-associated virtual stimuli presented on an AR-HMD and the user’s sense of subjective perception of duration and temperature. In particular, we investigate two independent variables: the apparent temperature of the virtual stimuli presented to the participant, which could be hot or cold, and the location of the stimuli, which could be in direct contact with the user, in indirect contact with the user, or both in direct and indirect contact simultaneously. We investigate how these variables affect the users’ perception of duration and perception of body and environment temperature by having participants make prospective time estimations while observing the virtual stimulus and answering subjective questions regarding their body and environment temperatures. Our work confirms that temperature-associated virtual stimuli are capable of having significant effects on the users’ perception of temperature, and highlights a possible limitation in the current augmented reality technology in that no secondary effects on the users’ perception of duration were observed.", "fno": "09089604", "keywords": [ "Psychology", "Augmented Reality", "Head Mounted Displays", "Virtual Reality", "Computer Graphics", "Graphics Systems And Interfaces", "Virtual Reality", "Human Centered Computing", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "University of Central Florida", "fullName": "Austin Erickson", "givenName": "Austin", "surname": "Erickson", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Gerd Bruder", "givenName": "Gerd", "surname": "Bruder", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Pamela J. Wisniewski", "givenName": "Pamela J.", "surname": "Wisniewski", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Gregory F. Welch", "givenName": "Gregory F.", "surname": "Welch", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "493-499", "year": "2020", "issn": null, "isbn": "978-1-7281-5608-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09089482", "articleId": "1jIxeHgBEkg", "__typename": "AdjacentArticleType" }, "next": { "fno": "09089537", "articleId": "1jIxdgX5pdK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2017/0563/0/08273612", "title": "Emotional responses of vibrotactile-thermal stimuli: Effects of constant-temperature thermal stimuli", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273612/12OmNqMPfQu", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a600", "title": "Minimising the duration of a system-controlled virtual reality tour", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a600/1J7WfSClY5y", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a074", "title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a001", "title": "Estimating the Just Noticeable Difference of Tactile Feedback in Oculus Quest 2 Controllers", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a001/1JrRdMd6OZi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09124686", "title": "Stimulus Sampling With 360-Videos: Examining Head Movements, Arousal, Presence, Simulator Sickness, and Preference on a Large Sample of Participants and Videos", "doi": null, "abstractUrl": "/journal/ta/2022/03/09124686/1kVbwGkgqYg", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199574", "title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a344", "title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a042", "title": "Rating Duration Analysis for Subjective Quality Assessment of 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a042/1vg7TpMdSH6", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a465", "title": "Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a465/1yeQA0ONooU", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a352", "title": "Heat Pain Threshold Modulation By Experiencing Burning Hands in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a352/1yeQKcrGZvG", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnXL2XEOw8", "doi": "10.1109/VRW52623.2021.00112", "title": "VR-Phore: A Novel Virtual Reality system for Diagnosis of Binocular Vision", "normalizedTitle": "VR-Phore: A Novel Virtual Reality system for Diagnosis of Binocular Vision", "abstract": "Binocular vision (BV) is the result of fusion between inputs from each eye to form a coherent image. BV anomalies are evaluated using different diagnostic tests and instruments. One such instrument is the Synoptophore, which evaluates three grades of BV. This equipment though efficient has certain limitations like ambient light while testing, bulky and expensive. We propose VR-Phore, application of a VR head-mounted display for diagnostics based on principle of the haploscope similar to Synoptophore. The proposed system addresses the limitations of Synoptophore with added advantage of a software platform to incorporate testing modules for a range of clinical conditions.", "abstracts": [ { "abstractType": "Regular", "content": "Binocular vision (BV) is the result of fusion between inputs from each eye to form a coherent image. BV anomalies are evaluated using different diagnostic tests and instruments. One such instrument is the Synoptophore, which evaluates three grades of BV. This equipment though efficient has certain limitations like ambient light while testing, bulky and expensive. We propose VR-Phore, application of a VR head-mounted display for diagnostics based on principle of the haploscope similar to Synoptophore. The proposed system addresses the limitations of Synoptophore with added advantage of a software platform to incorporate testing modules for a range of clinical conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Binocular vision (BV) is the result of fusion between inputs from each eye to form a coherent image. BV anomalies are evaluated using different diagnostic tests and instruments. One such instrument is the Synoptophore, which evaluates three grades of BV. This equipment though efficient has certain limitations like ambient light while testing, bulky and expensive. We propose VR-Phore, application of a VR head-mounted display for diagnostics based on principle of the haploscope similar to Synoptophore. The proposed system addresses the limitations of Synoptophore with added advantage of a software platform to incorporate testing modules for a range of clinical conditions.", "fno": "405700a460", "keywords": [ "Helmet Mounted Displays", "Medical Diagnostic Computing", "Patient Diagnosis", "Virtual Reality", "Vision Defects", "VR Phore", "Virtual Reality System", "Eye", "Coherent Image", "BV Anomalies", "Synoptophore", "VR Head Mounted Display", "Binocular Vision Diagnosis", "Haploscope", "Software Platform", "Clinical Conditions", "Three Dimensional Displays", "Head Mounted Displays", "Instruments", "Conferences", "Virtual Reality", "Resists", "User Interfaces", "Virtual Reality", "Synoptophore", "Binocular Vision", "Health Clinical Diagnosis" ], "authors": [ { "affiliation": "IIIT,Hyderabad", "fullName": "Sai Srinvas Vuddagiri", "givenName": "Sai Srinvas", "surname": "Vuddagiri", "__typename": "ArticleAuthorType" }, { "affiliation": "IIIT,Hyderabad", "fullName": "Kavita Vemuri", "givenName": "Kavita", "surname": "Vemuri", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Hyderabad", "fullName": "Male Shivaram", "givenName": "Male", "surname": "Shivaram", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Hyderabad", "fullName": "Rishi Bhardwaj", "givenName": "Rishi", "surname": "Bhardwaj", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "460-461", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "405700a458", "articleId": "1tnWZju755K", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a462", "articleId": "1tnWZyFmWpG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tg/5555/01/09850416", "title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a749", "title": "Supporting Multi-User Co-located Training for Industrial Procedures through Immersive Virtual Reality (VR) and a Large-scale Display", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a749/1J7WrIn9BJu", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a287", "title": "Using HMD-based Hand Tracking Virtual Reality in Canine Anatomy Summative Assessment: a User Study", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a287/1JrRbaENq6I", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a344", "title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a432", "title": "Virtual Reality in transit: how acceptable is VR use on public transport?", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a432/1tnXPIz34pa", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a542", "title": "Field of View Effect on Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a345", "title": "Spherical World in Miniature: Exploring the Tiny Planets Metaphor for Discrete Locomotion in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a345/1tuAuPBgHTi", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a118", "title": "Exploring Head-based Mode-Switching in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a118/1yeD1RhEseY", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a133", "title": "Learning to Perceive: Perceptual Resolution Enhancement for VR Display with Efficient Neural Network Processing", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a133/1yeQzHQNgvC", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a242", "title": "Investigating the Affective State of VR HMD User When Watching Videos Displayed in Different Formats", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a242/1zxLyEF5yRW", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNAq3hBL", "doi": "10.1109/VR.2017.7892332", "title": "Exploring non-reversing magic mirrors for screen-based augmented reality systems", "normalizedTitle": "Exploring non-reversing magic mirrors for screen-based augmented reality systems", "abstract": "Screen-based Augmented Reality (AR) systems can be built as a window into the real world as often done in mobile AR applications or using the Magic Mirror metaphor, where users can see themselves with augmented graphics on a large display. The term Magic Mirror implies that the display shows the users enantiomorph, i.e. the mirror image, such that the system mimics a real-world physical mirror. However, the question arises whether one should design a traditional mirror, or instead display the true mirror image by means of a non-reversing mirror? We discuss the perceptual differences between these two mirror visualization concepts and present a first comparative study in the context of Magic Mirror anatomy teaching.", "abstracts": [ { "abstractType": "Regular", "content": "Screen-based Augmented Reality (AR) systems can be built as a window into the real world as often done in mobile AR applications or using the Magic Mirror metaphor, where users can see themselves with augmented graphics on a large display. The term Magic Mirror implies that the display shows the users enantiomorph, i.e. the mirror image, such that the system mimics a real-world physical mirror. However, the question arises whether one should design a traditional mirror, or instead display the true mirror image by means of a non-reversing mirror? We discuss the perceptual differences between these two mirror visualization concepts and present a first comparative study in the context of Magic Mirror anatomy teaching.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Screen-based Augmented Reality (AR) systems can be built as a window into the real world as often done in mobile AR applications or using the Magic Mirror metaphor, where users can see themselves with augmented graphics on a large display. The term Magic Mirror implies that the display shows the users enantiomorph, i.e. the mirror image, such that the system mimics a real-world physical mirror. However, the question arises whether one should design a traditional mirror, or instead display the true mirror image by means of a non-reversing mirror? We discuss the perceptual differences between these two mirror visualization concepts and present a first comparative study in the context of Magic Mirror anatomy teaching.", "fno": "07892332", "keywords": [ "Mirrors", "Augmented Reality", "Education", "Biomedical Imaging", "Psychology", "Context", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities", "H 5 2 Information Interfaces And Presentation User Interfaces Ergonomics" ], "authors": [ { "affiliation": "Technische Universität München, Munich, Germany", "fullName": "Felix Bork", "givenName": "Felix", "surname": "Bork", "__typename": "ArticleAuthorType" }, { "affiliation": "Johns Hopkins University, Baltimore MD, United States", "fullName": "Roghayeh Barmaki", "givenName": "Roghayeh", "surname": "Barmaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Technische Universität München, Munich Germany", "fullName": "Ulrich Eck", "givenName": "Ulrich", "surname": "Eck", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Ottawa, Ottawa Canada", "fullName": "Pascal Fallavolita", "givenName": "Pascal", "surname": "Fallavolita", "__typename": "ArticleAuthorType" }, { "affiliation": "Johns Hopkins University, Baltimore MD, United States", "fullName": "Bernhard Fuerst", "givenName": "Bernhard", "surname": "Fuerst", "__typename": "ArticleAuthorType" }, { "affiliation": "Technische Universität München, Munich Germany, Johns Hopkins University, Baltimore MD, United States", "fullName": "Nassir Navab", "givenName": "Nassir", "surname": "Navab", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "373-374", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892331", "articleId": "12OmNvk7JO0", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892333", "articleId": "12OmNAXxX2a", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2005/8929/0/01492775", "title": "Realistic occlusion effects in mirror-based co-located augmented reality systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492775/12OmNAYoKpz", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671784", "title": "See-through window vs. magic mirror: A comparison in supporting visual-motor tasks", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671784/12OmNAoUTa9", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504720", "title": "Depth perception in mirrors: The effects of video-based augmented reality in driver's side view mirrors", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504720/12OmNBqdr66", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671803", "title": "Kinect for interactive AR anatomy learning", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671803/12OmNCd2rmp", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2017/2943/0/2943a169", "title": "Empirical Study of Non-Reversing Magic Mirrors for Augmented Reality Anatomy Learning", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943a169/12OmNyprnqS", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402557", "title": "A hand-held AR magic lens with user-perspective rendering", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402557/12OmNz5s0SW", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2007/0905/0/04161035", "title": "Magic Mirror System with Hand-held and Wearable Augmentations", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161035/12OmNzgwmIH", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/01/mcg2013010012", "title": "Magic Cards: A New Augmented-Reality Approach", "doi": null, "abstractUrl": "/magazine/cg/2013/01/mcg2013010012/13rRUxBa5hz", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/04/mcg2006040064", "title": "Magic Lenses for Augmented Virtual Environments", "doi": null, "abstractUrl": "/magazine/cg/2006/04/mcg2006040064/13rRUyZaxsW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a217", "title": "Augmented Mirrors", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a217/1pysxrHE5Q4", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNviZlH1", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "acronym": "sitis", "groupId": "1002425", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNwpXRVO", "doi": "10.1109/SITIS.2013.69", "title": "Making a Hands-On Display with Augmented Reality Work at a Science Museum", "normalizedTitle": "Making a Hands-On Display with Augmented Reality Work at a Science Museum", "abstract": "In this paper, we propose an augmented reality (AR) system with a laser projection device as a hands-on display at a science museum. The AR system provides virtual information, which learners can control for a visual explanation about an exhibited item. Learners develop their knowledge and understanding through the display without any modification to the item and/or the existing displayed explanation. We conducted an experiment using the AR system, with child visitors to Gamagori Museum of Earth, Life and the Sea. AR systems should meet the following criteria if they are to be considered effective: i) the display should make learners interested in the exhibited item, ii) learners should be able to easily handle the AR display, and iii) learners should construct their knowledge through using the display. In our experiment, we evaluated the AR display against these criteria. We conclude that the AR display system enables learners to construct their knowledge in reality, and that the system encourages learners' interest in the exhibited items.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose an augmented reality (AR) system with a laser projection device as a hands-on display at a science museum. The AR system provides virtual information, which learners can control for a visual explanation about an exhibited item. Learners develop their knowledge and understanding through the display without any modification to the item and/or the existing displayed explanation. We conducted an experiment using the AR system, with child visitors to Gamagori Museum of Earth, Life and the Sea. AR systems should meet the following criteria if they are to be considered effective: i) the display should make learners interested in the exhibited item, ii) learners should be able to easily handle the AR display, and iii) learners should construct their knowledge through using the display. In our experiment, we evaluated the AR display against these criteria. We conclude that the AR display system enables learners to construct their knowledge in reality, and that the system encourages learners' interest in the exhibited items.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose an augmented reality (AR) system with a laser projection device as a hands-on display at a science museum. The AR system provides virtual information, which learners can control for a visual explanation about an exhibited item. Learners develop their knowledge and understanding through the display without any modification to the item and/or the existing displayed explanation. We conducted an experiment using the AR system, with child visitors to Gamagori Museum of Earth, Life and the Sea. AR systems should meet the following criteria if they are to be considered effective: i) the display should make learners interested in the exhibited item, ii) learners should be able to easily handle the AR display, and iii) learners should construct their knowledge through using the display. In our experiment, we evaluated the AR display against these criteria. We conclude that the AR display system enables learners to construct their knowledge in reality, and that the system encourages learners' interest in the exhibited items.", "fno": "3211a385", "keywords": [ "Whales", "Laser Applications", "Interviews", "Teeth", "Augmented Reality", "Education", "Augmented Reality", "Hands On Display", "Museum" ], "authors": [ { "affiliation": null, "fullName": "Toru B. Takahashi", "givenName": "Toru B.", "surname": "Takahashi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Satoshi Takahashi", "givenName": "Satoshi", "surname": "Takahashi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fusako Kusunoki", "givenName": "Fusako", "surname": "Kusunoki", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Takao Terano", "givenName": "Takao", "surname": "Terano", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shigenori Inagaki", "givenName": "Shigenori", "surname": "Inagaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-12-01T00:00:00", "pubType": "proceedings", "pages": "385-390", "year": "2013", "issn": null, "isbn": "978-1-4799-3211-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3211a378", "articleId": "12OmNxYL5gS", "__typename": "AdjacentArticleType" }, "next": { "fno": "3211a391", "articleId": "12OmNqEjhYP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icat/2007/3056/0/30560055", "title": "Volumetric Display for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icat/2007/30560055/12OmNBCqbJu", "parentPublication": { "id": "proceedings/icat/2007/3056/0", "title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2015/9628/0/9628a001", "title": "Using Augmented Reality to Promote Homogeneity in Learning Achievement", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a001/12OmNwc3wu8", "parentPublication": { "id": "proceedings/Ismar-mashd/2015/9628/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/digitel/2008/3409/0/3409a215", "title": "Pedagogy Play: Virtual Instructors for Wearable Augmented Reality during Hands-On Learning and Play", "doi": null, "abstractUrl": "/proceedings-article/digitel/2008/3409a215/12OmNxXCGKF", "parentPublication": { "id": "proceedings/digitel/2008/3409/0", "title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2013/2246/0/2246a387", "title": "Seamless Annotation Display for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/cw/2013/2246a387/12OmNzkuKyK", "parentPublication": { "id": "proceedings/cw/2013/2246/0", "title": "2013 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700c344", "title": "The Application of Augmented Reality Technology in Museum Display Design", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700c344/1DND5QhoUzm", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0", "title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10051634", "title": "ImTooth: Neural Implicit Tooth for Dental Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10051634/1L03a1rPCCY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09211732", "title": "Comparison of Augmented Reality Display Techniques to Support Medical Needle Insertion", "doi": null, "abstractUrl": "/journal/tg/2020/12/09211732/1nB9X7YX7eU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a295", "title": "Using Augmented Reality to Explore Museum Artifacts", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a295/1pBMgKgABdC", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a569", "title": "Investigation of Microcirculatory Effects of Experiencing Burning Hands in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a569/1tnXxLHfCOQ", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNx2zjyh", "doi": "10.1109/ISMAR.2014.6948510", "title": "A ‘Look Into’ Medical augmented reality", "normalizedTitle": "A ‘Look Into’ Medical augmented reality", "abstract": "The concept of augmented reality (AR) has been introduced to variety of felds in the last decade. Recent development of portable devices such as smart phone and tablet PC provides the community a lot of possible applications in AR systems. Even in the medical feld, various AR systems have recently been proposed: systems for education, pre-planning, and those in the operating room. The aim of this tutorial is to bridge the expertise between the researchers in ISMAR community and medical doctors so that researchers can contribute to the medical domain with their specialty more than one can do right now. This tutorial aims to make a bridge between researchers in augmented reality feld and medical doctors. We target an audience interested in medical augmented reality systems.", "abstracts": [ { "abstractType": "Regular", "content": "The concept of augmented reality (AR) has been introduced to variety of felds in the last decade. Recent development of portable devices such as smart phone and tablet PC provides the community a lot of possible applications in AR systems. Even in the medical feld, various AR systems have recently been proposed: systems for education, pre-planning, and those in the operating room. The aim of this tutorial is to bridge the expertise between the researchers in ISMAR community and medical doctors so that researchers can contribute to the medical domain with their specialty more than one can do right now. This tutorial aims to make a bridge between researchers in augmented reality feld and medical doctors. We target an audience interested in medical augmented reality systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The concept of augmented reality (AR) has been introduced to variety of felds in the last decade. Recent development of portable devices such as smart phone and tablet PC provides the community a lot of possible applications in AR systems. Even in the medical feld, various AR systems have recently been proposed: systems for education, pre-planning, and those in the operating room. The aim of this tutorial is to bridge the expertise between the researchers in ISMAR community and medical doctors so that researchers can contribute to the medical domain with their specialty more than one can do right now. This tutorial aims to make a bridge between researchers in augmented reality feld and medical doctors. We target an audience interested in medical augmented reality systems.", "fno": "06948510", "keywords": [ "Biomedical Imaging", "Augmented Reality", "Educational Institutions", "Tutorials", "Bridges", "Surgery" ], "authors": [ { "affiliation": "Waseda University, Japan, Pascal Fallavollita, Technische Universität München, Germany", "fullName": "Yuji Oyamada", "givenName": "Yuji", "surname": "Oyamada", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948509", "articleId": "12OmNvm6VJ7", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948511", "articleId": "12OmNwseESG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402569", "title": "Superman-like X-ray vision: Towards brain-computer interfaces for medical augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402569/12OmNAoUT3L", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a001", "title": "Introducing Augmented Reality to Optical Coherence Tomography in Ophthalmic Microsurgery", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a001/12OmNvmG7Wp", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciibms/2017/6664/0/08279706", "title": "A medical training system using augmented reality", "doi": null, "abstractUrl": "/proceedings-article/iciibms/2017/08279706/12OmNwErpJY", "parentPublication": { "id": "proceedings/iciibms/2017/6664/0", "title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538837", "title": "Contextual Anatomic Mimesis Hybrid In-Situ Visualization Method for Improving Multi-Sensory Depth Perception in Medical Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538837/12OmNx5pj13", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2005/2459/0/01544697", "title": "Spatial measurements for medical augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2005/01544697/12OmNyFCvV8", "parentPublication": { "id": "proceedings/ismar/2005/2459/0", "title": "Fourth IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicse/2013/5118/0/5118a100", "title": "A Convenient Method of Video See-Through Augmented Reality Based on Image-Guided Surgery System", "doi": null, "abstractUrl": "/proceedings-article/icicse/2013/5118a100/12OmNzSQdqd", "parentPublication": { "id": "proceedings/icicse/2013/5118/0", "title": "2013 Seventh International Conference on Internet Computing for Engineering and Science (ICICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699222", "title": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699222/19F1PQOMxWg", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089476", "title": "Enlightening Patients with Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089476/1jIxaFnm0GQ", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09211732", "title": "Comparison of Augmented Reality Display Techniques to Support Medical Needle Insertion", "doi": null, "abstractUrl": "/journal/tg/2020/12/09211732/1nB9X7YX7eU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a194", "title": "SkillsLab+ - Augmented Reality Enhanced Medical Training", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a194/1zxLxeJ81yM", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzzxuy8", "title": "2013 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNzkuKyK", "doi": "10.1109/CW.2013.12", "title": "Seamless Annotation Display for Augmented Reality", "normalizedTitle": "Seamless Annotation Display for Augmented Reality", "abstract": "This paper proposes seamless display connection between two display styles, AR display and table-top display. The former is used in most augmented reality applications, and the latter is used in table-top display. Our framework supports three types of annotations, inner annotation, outer annotation and adaptive annotation. We developed the prototype system that can seamlessly switch the two display styles.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes seamless display connection between two display styles, AR display and table-top display. The former is used in most augmented reality applications, and the latter is used in table-top display. Our framework supports three types of annotations, inner annotation, outer annotation and adaptive annotation. We developed the prototype system that can seamlessly switch the two display styles.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes seamless display connection between two display styles, AR display and table-top display. The former is used in most augmented reality applications, and the latter is used in table-top display. Our framework supports three types of annotations, inner annotation, outer annotation and adaptive annotation. We developed the prototype system that can seamlessly switch the two display styles.", "fno": "2246a387", "keywords": [ "Monitoring", "Augmented Reality", "Educational Institutions", "Prototypes", "Painting", "Information Science", "Electronic Mail", "Homography Transformation", "Augmented Reality", "Marker Less Tracking" ], "authors": [ { "affiliation": "Grad. Sch. of Inf. Sci., Kyushu Sangyo Univ., Fukuoka, Japan", "fullName": "Satoshi Yonemoto", "givenName": "Satoshi", "surname": "Yonemoto", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "387-387", "year": "2013", "issn": null, "isbn": "978-1-4799-2246-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2246a386", "articleId": "12OmNzmclIP", "__typename": "AdjacentArticleType" }, "next": { "fno": "2246a388", "articleId": "12OmNAlvI9b", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948459", "title": "[Poster] Utilizing contact-view as an augmented reality authoring method for printed document annotation", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948459/12OmNAlvI6d", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2007/3056/0/30560055", "title": "Volumetric Display for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icat/2007/30560055/12OmNBCqbJu", "parentPublication": { "id": "proceedings/icat/2007/3056/0", "title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/1999/0210/0/02100128", "title": "Optical Occlusion and Shadows in a 'See-through' Augmented Reality Display", "doi": null, "abstractUrl": "/proceedings-article/iv/1999/02100128/12OmNBigFtH", "parentPublication": { "id": "proceedings/iv/1999/0210/0", "title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538832", "title": "Evaluating Display Types for AR Selection and Annotation", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538832/12OmNrIaef4", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892383", "title": "Gesture-based augmented reality annotation", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a385", "title": "Making a Hands-On Display with Augmented Reality Work at a Science Museum", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a385/12OmNwpXRVO", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446457", "title": "Memory Task Performance Across Augmented and Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446457/13bd1fph1yg", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699300", "title": "An Explanatory Windshield Display Interface with Augmented Reality Elements for Urban Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699300/19F1SHbDTJC", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798128", "title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090648", "title": "Combining Wristband Display and Wearable Haptics for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090648/1jIxzbR8eVa", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirl", "title": "2018 17th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "acronym": "dcabes", "groupId": "1800130", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45XwUAGV", "doi": "10.1109/DCABES.2018.00029", "title": "Design of Spot Introduction and User Interaction System Based on AR Augmented Reality Technology", "normalizedTitle": "Design of Spot Introduction and User Interaction System Based on AR Augmented Reality Technology", "abstract": "In order to construct a convenient, interesting attraction introduction and user interaction system, based on AR enhanced display technology, a life application system is development. The system integrates AR interactive service module, mobile user terminal module, Web communication service module, and information recording module. The user turns on the mobile user terminal and obtains functional services such as AR interactive mode and navigation area introduction. The service information is enter into the information-recording module, and the user selects and records the information via the Web to exchange and share. Experiments display that the application system is convenient and interesting. The results display that the application of the proposed system can satisfy the daily needs of users.", "abstracts": [ { "abstractType": "Regular", "content": "In order to construct a convenient, interesting attraction introduction and user interaction system, based on AR enhanced display technology, a life application system is development. The system integrates AR interactive service module, mobile user terminal module, Web communication service module, and information recording module. The user turns on the mobile user terminal and obtains functional services such as AR interactive mode and navigation area introduction. The service information is enter into the information-recording module, and the user selects and records the information via the Web to exchange and share. Experiments display that the application system is convenient and interesting. The results display that the application of the proposed system can satisfy the daily needs of users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to construct a convenient, interesting attraction introduction and user interaction system, based on AR enhanced display technology, a life application system is development. The system integrates AR interactive service module, mobile user terminal module, Web communication service module, and information recording module. The user turns on the mobile user terminal and obtains functional services such as AR interactive mode and navigation area introduction. The service information is enter into the information-recording module, and the user selects and records the information via the Web to exchange and share. Experiments display that the application system is convenient and interesting. The results display that the application of the proposed system can satisfy the daily needs of users.", "fno": "744500a076", "keywords": [ "Augmented Reality", "Mobile Communication", "Mobile Handsets", "Navigation", "Web Services", "User Interaction System", "AR Enhanced Display Technology", "Life Application System", "AR Interactive Service Module", "Mobile User Terminal Module", "Web Communication Service Module", "Information Recording Module", "Navigation Area Introduction", "Service Information", "Information Recording Module", "Spot Introduction", "Reality Technology", "Augmented Reality", "Web Servers", "Navigation", "Computational Modeling", "Visualization", "Solid Modeling", "Mobile Handsets", "AR Augmented Reality Technology Attractions Introduction User Interaction System AR Interaction Service Module" ], "authors": [ { "affiliation": null, "fullName": "Wenjun Tang", "givenName": "Wenjun", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qihang Ge", "givenName": "Qihang", "surname": "Ge", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lei Zhou", "givenName": "Lei", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Quanyin Zhu", "givenName": "Quanyin", "surname": "Zhu", "__typename": "ArticleAuthorType" } ], "idPrefix": "dcabes", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "76-79", "year": "2018", "issn": null, "isbn": "978-1-5386-7445-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "744500a072", "articleId": "17D45WXIkIb", "__typename": "AdjacentArticleType" }, "next": { "fno": "744500a080", "articleId": "17D45WgziNK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660a049", "title": "The Ventriloquist Effect in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a049/12OmNvAiSE1", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2011/4511/0/4511a063", "title": "AR-Based Positioning for Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/icppw/2011/4511a063/12OmNwwuE0H", "parentPublication": { "id": "proceedings/icppw/2011/4511/0", "title": "2011 40th International Conference on Parallel Processing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2018/7759/0/775900a209", "title": "Exploring Augmented Reality Interaction for Everyday Multipurpose Wearable Robots", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2018/775900a209/17D45WaTkd9", "parentPublication": { "id": "proceedings/rtcsa/2018/7759/0", "title": "2018 IEEE 24th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a128", "title": "Multi-vehicle Cooperative Military Training Simulation System Based on Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a128/1gysonjGAqA", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089433", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090479", "title": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090479/1jIxlrWEUmc", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/03/09076055", "title": "Pain Marker Evaluation Application in Augmented Reality and Mobile Platforms", "doi": null, "abstractUrl": "/magazine/cs/2020/03/09076055/1jeD89s9Z96", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09211732", "title": "Comparison of Augmented Reality Display Techniques to Support Medical Needle Insertion", "doi": null, "abstractUrl": "/journal/tg/2020/12/09211732/1nB9X7YX7eU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2021/2744/0/09631438", "title": "Mobile Augmented Reality for Craniotomy Planning", "doi": null, "abstractUrl": "/proceedings-article/iscc/2021/09631438/1zmvEvuTSCI", "parentPublication": { "id": "proceedings/iscc/2021/2744/0", "title": "2021 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7W8cdLJeg", "doi": "10.1109/ISMAR-Adjunct57072.2022.00132", "title": "An Evaluation of Caret Navigation Methods for Text Editing in Augmented Reality", "normalizedTitle": "An Evaluation of Caret Navigation Methods for Text Editing in Augmented Reality", "abstract": "A common task in text editing is navigating the text caret as part of the text editing process. We explore three key dimensions in the design space of caret navigation techniques: cursor placement method, visual expansion in the form of a magnifying lens, and display distance. In addition to two commonly used cursor placement methods, direct touch and raycast, we also present a novel multimodal text cursor placement method combining eye gaze and touch gestures for optical see-through augmented reality (AR). This method allows the user to refine the caret position with an indirect mid-air virtual touch block after the caret has snapped to a location provided by an initial eye fixation. We derive eight combinations from three design dimensions and study their performance in a user study with 24 participants. Our results reveal that: 1) raycast delivered the fastest completion times among all combinations evaluated; 2) in near-field conditions the multimodal method can achieve similar performance as direct touch input with less physical effort; and 3) magnifying lenses offer no significant performance advantages for caret navigation in AR.", "abstracts": [ { "abstractType": "Regular", "content": "A common task in text editing is navigating the text caret as part of the text editing process. We explore three key dimensions in the design space of caret navigation techniques: cursor placement method, visual expansion in the form of a magnifying lens, and display distance. In addition to two commonly used cursor placement methods, direct touch and raycast, we also present a novel multimodal text cursor placement method combining eye gaze and touch gestures for optical see-through augmented reality (AR). This method allows the user to refine the caret position with an indirect mid-air virtual touch block after the caret has snapped to a location provided by an initial eye fixation. We derive eight combinations from three design dimensions and study their performance in a user study with 24 participants. Our results reveal that: 1) raycast delivered the fastest completion times among all combinations evaluated; 2) in near-field conditions the multimodal method can achieve similar performance as direct touch input with less physical effort; and 3) magnifying lenses offer no significant performance advantages for caret navigation in AR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A common task in text editing is navigating the text caret as part of the text editing process. We explore three key dimensions in the design space of caret navigation techniques: cursor placement method, visual expansion in the form of a magnifying lens, and display distance. In addition to two commonly used cursor placement methods, direct touch and raycast, we also present a novel multimodal text cursor placement method combining eye gaze and touch gestures for optical see-through augmented reality (AR). This method allows the user to refine the caret position with an indirect mid-air virtual touch block after the caret has snapped to a location provided by an initial eye fixation. We derive eight combinations from three design dimensions and study their performance in a user study with 24 participants. Our results reveal that: 1) raycast delivered the fastest completion times among all combinations evaluated; 2) in near-field conditions the multimodal method can achieve similar performance as direct touch input with less physical effort; and 3) magnifying lenses offer no significant performance advantages for caret navigation in AR.", "fno": "536500a640", "keywords": [ "Augmented Reality", "Data Visualisation", "Eye", "Gesture Recognition", "Human Computer Interaction", "Lenses", "Mouse Controllers Computers", "Touch Sensitive Screens", "User Interfaces", "Word Processing", "Augmented Reality", "Caret Navigation Methods", "Caret Navigation Techniques", "Caret Position", "Design Dimensions", "Design Space", "Direct Touch Input", "Display Distance", "Eye Gaze", "Initial Eye Fixation", "Key Dimensions", "Magnifying Lens", "Mid Air Virtual Touch Block", "Multimodal Method", "Multimodal Text Cursor Placement Method", "Placement Methods", "Raycast", "Text Caret", "Text Editing Process", "Visual Expansion", "Visualization", "Navigation", "Task Analysis", "Augmented Reality", "Lenses", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Techniques", "Text Input", "Computing Methodologies", "Computer Graphics", "Graphics Systems And Interfaces", "Mixed Augmented Reality" ], "authors": [ { "affiliation": "University of Cambridge", "fullName": "Jinghui Hu", "givenName": "Jinghui", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cambridge", "fullName": "John J. Dudley", "givenName": "John J.", "surname": "Dudley", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cambridge", "fullName": "Per Ola Kristensson", "givenName": "Per Ola", "surname": "Kristensson", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "640-645", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a634", "articleId": "1J7Woy7eKTS", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a646", "articleId": "1J7WebwvRgA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643602", "title": "Camera pose navigation using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643602/12OmNvA1hoG", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131755", "title": "Mapping 2D input to 3D immersive spatial augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131755/12OmNwAKCNT", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549411", "title": "Early steps towards understanding text legibility in handheld augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2017/3870/0/3870a466", "title": "ARCoins. An Augmented Reality App for Learning about Numismatics", "doi": null, "abstractUrl": "/proceedings-article/icalt/2017/3870a466/12OmNyqRn48", "parentPublication": { "id": "proceedings/icalt/2017/3870/0", "title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448289", "title": "Performance Envelopes of in-Air Direct and Smartwatch Indirect Control for Head-Mounted Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448289/13bd1fZBGcE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl-hcc/2022/4214/0/09833120", "title": "A technique to improve text editing on smartphones", "doi": null, "abstractUrl": "/proceedings-article/vl-hcc/2022/09833120/1FUSHflcy2I", "parentPublication": { "id": "proceedings/vl-hcc/2022/4214/0", "title": "2022 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2019/9148/0/08767420", "title": "HIBEY: Hide the Keyboard in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm", "parentPublication": { "id": "proceedings/percom/2019/9148/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797992", "title": "Text Presentation for Augmented Reality Applications in Dual-Task Situations", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797992/1cJ0SIvkZnG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798340", "title": "Augmented Reality Map Navigation with Freehand Gestures", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798340/1cJ1fg0gjAY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a332", "title": "ARPads: Mid-air Indirect Input for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a332/1pysxWDVgS4", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1zmvyw5CYAU", "title": "2021 IEEE Symposium on Computers and Communications (ISCC)", "acronym": "iscc", "groupId": "1000156", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1zmvEvuTSCI", "doi": "10.1109/ISCC53001.2021.9631438", "title": "Mobile Augmented Reality for Craniotomy Planning", "normalizedTitle": "Mobile Augmented Reality for Craniotomy Planning", "abstract": "Augmented reality (AR) neuronavigation has been proposed to address the shortcomings of conventional neuron-avigators. Researchers have presented low-cost AR methods for craniotomy planning, but they lack navigation capabilities. Other studies introduce AR neuronavigation systems that are a step further in usability than traditional neuronavigators, but they may be hard to obtain or reproduce. AR neuronavigation was also implemented on mobile devices, but most systems have an undesired lag during the navigation. This work investigates the feasibility of creating an accurate and low-cost standalone mobile AR neuronavigator. Unlike other mobile approaches, this solution has no perceptible lag as the processing is efficiently performed on the device instead of an external computer. Results show that a neuronavigation system can be deployed on a mobile device, running smoothly at 60 frames per second, and achieving a smaller than 5 mm target registration error.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented reality (AR) neuronavigation has been proposed to address the shortcomings of conventional neuron-avigators. Researchers have presented low-cost AR methods for craniotomy planning, but they lack navigation capabilities. Other studies introduce AR neuronavigation systems that are a step further in usability than traditional neuronavigators, but they may be hard to obtain or reproduce. AR neuronavigation was also implemented on mobile devices, but most systems have an undesired lag during the navigation. This work investigates the feasibility of creating an accurate and low-cost standalone mobile AR neuronavigator. Unlike other mobile approaches, this solution has no perceptible lag as the processing is efficiently performed on the device instead of an external computer. Results show that a neuronavigation system can be deployed on a mobile device, running smoothly at 60 frames per second, and achieving a smaller than 5 mm target registration error.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented reality (AR) neuronavigation has been proposed to address the shortcomings of conventional neuron-avigators. Researchers have presented low-cost AR methods for craniotomy planning, but they lack navigation capabilities. Other studies introduce AR neuronavigation systems that are a step further in usability than traditional neuronavigators, but they may be hard to obtain or reproduce. AR neuronavigation was also implemented on mobile devices, but most systems have an undesired lag during the navigation. This work investigates the feasibility of creating an accurate and low-cost standalone mobile AR neuronavigator. Unlike other mobile approaches, this solution has no perceptible lag as the processing is efficiently performed on the device instead of an external computer. Results show that a neuronavigation system can be deployed on a mobile device, running smoothly at 60 frames per second, and achieving a smaller than 5 mm target registration error.", "fno": "09631438", "keywords": [ "Augmented Reality", "Data Visualisation", "Image Registration", "Mobile Computing", "Neurophysiology", "Surgery", "Mobile Augmented Reality", "Craniotomy Planning", "Augmented Reality Neuronavigation", "Conventional Neuron Avigators", "Low Cost AR Methods", "Navigation Capabilities", "AR Neuronavigation Systems", "Traditional Neuronavigators", "Mobile Device", "Undesired Lag", "Accurate Cost Standalone Mobile AR Neuronavigator", "Low Cost Standalone Mobile AR Neuronavigator", "Mobile Approaches", "Perceptible Lag", "Neuronavigation System", "Performance Evaluation", "Navigation", "Mobile Handsets", "Planning", "Usability", "Augmented Reality", "Neuronavigation", "Neurosurgery" ], "authors": [ { "affiliation": "Universidade Federal de Sergipe,Departamento de Computação,São Cristóvão,SE,Brazil", "fullName": "Marcel Oliveira Alves", "givenName": "Marcel Oliveira", "surname": "Alves", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade Federal de Sergipe,Departamento de Computação,São Cristóvão,SE,Brazil", "fullName": "Daniel Oliveira Dantas", "givenName": "Daniel Oliveira", "surname": "Dantas", "__typename": "ArticleAuthorType" } ], "idPrefix": "iscc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-09-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2021", "issn": null, "isbn": "978-1-6654-2744-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09631401", "articleId": "1zmvDt17mG4", "__typename": "AdjacentArticleType" }, "next": { "fno": "09631391", "articleId": "1zmvLO4jAAg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsc/2016/0662/0/0662a358", "title": "Mobile Augmented Reality Authoring Tool", "doi": null, "abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC", "parentPublication": { "id": "proceedings/icsc/2016/0662/0", "title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedeg/2015/8910/0/07114484", "title": "Augmented reality applied in tourism mobile applications", "doi": null, "abstractUrl": "/proceedings-article/icedeg/2015/07114484/12OmNCfAPK9", "parentPublication": { "id": "proceedings/icedeg/2015/8910/0", "title": "2015 Second International Conference on eDemocracy & eGovernment (ICEDEG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a308", "title": "Research and Application of Indoor Guide Based on Mobile Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a308/12OmNxwENA9", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2013/2809/0/2809a139", "title": "Campus Navigation System Based on Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icinis/2013/2809a139/12OmNxzMnUI", "parentPublication": { "id": "proceedings/icinis/2013/2809/0", "title": "2013 6th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2018/7445/0/744500a076", "title": "Design of Spot Introduction and User Interaction System Based on AR Augmented Reality Technology", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2018/744500a076/17D45XwUAGV", "parentPublication": { "id": "proceedings/dcabes/2018/7445/0", "title": "2018 17th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2019/9148/0/08767388", "title": "M2A: A Framework for Visualizing Information from Mobile Web to Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/percom/2019/08767388/1bQzkg8Ah7G", "parentPublication": { "id": "proceedings/percom/2019/9148/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2019/3485/0/348500a337", "title": "An Augmented Reality App for Therapeutic Education and Suitable for Mobile Devices with Different Features", "doi": null, "abstractUrl": "/proceedings-article/icalt/2019/348500a337/1cYi0BDk7ba", "parentPublication": { "id": "proceedings/icalt/2019/3485/2161-377X", "title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090527", "title": "Distance Perception in Modern Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090527/1jIxsZjczAc", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212860", "title": "User Engagement for Collaborative Learning on a Mobile and Desktop Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212860/1nHRTRhZdRK", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a454", "title": "Augmented Reality with Maps for Off-Screen POI Awareness", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a454/1rSR7Fgh4qc", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzQhP7Z", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "acronym": "isot", "groupId": "1002942", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNBkxstG", "doi": "10.1109/ISOT.2014.78", "title": "Holographic Femtosecond Laser Processing with Full Control of Phase Distributions and Polarization States of Light", "normalizedTitle": "Holographic Femtosecond Laser Processing with Full Control of Phase Distributions and Polarization States of Light", "abstract": "A control of degrees of freedom in the manipulation of light, including amplitude, phase and polarization is important for femto second laser processing with effective light-matter interactions. In this study, we demonstrate femto second laser processing with full control of phase distributions and polarization states of light by computer-generated holograms displayed on the spatial light modulators.", "abstracts": [ { "abstractType": "Regular", "content": "A control of degrees of freedom in the manipulation of light, including amplitude, phase and polarization is important for femto second laser processing with effective light-matter interactions. In this study, we demonstrate femto second laser processing with full control of phase distributions and polarization states of light by computer-generated holograms displayed on the spatial light modulators.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A control of degrees of freedom in the manipulation of light, including amplitude, phase and polarization is important for femto second laser processing with effective light-matter interactions. In this study, we demonstrate femto second laser processing with full control of phase distributions and polarization states of light by computer-generated holograms displayed on the spatial light modulators.", "fno": "07119442", "keywords": [ "Ultrafast Optics", "Image Reconstruction", "Lasers", "Optical Device Fabrication", "Laser Beams", "Optical Polarization", "Holography", "Femtosecond Laser Microstructure Fabrication", "Computer Generated Hologram", "Spatial Light Modulator" ], "authors": [ { "affiliation": null, "fullName": "Satoshi Hasegawa", "givenName": "Satoshi", "surname": "Hasegawa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yoshio Hayasaki", "givenName": "Yoshio", "surname": "Hayasaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "isot", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-11-01T00:00:00", "pubType": "proceedings", "pages": "291-294", "year": "2014", "issn": null, "isbn": "978-1-4673-6752-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07119441", "articleId": "12OmNCctfcg", "__typename": "AdjacentArticleType" }, "next": { "fno": "07119443", "articleId": "12OmNwF0BX4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/1995/7042/0/70420981", "title": "Improving laser triangulation sensors using polarization", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420981/12OmNAtst75", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2016/4320/0/07945751", "title": "Towards the growth of optical security systems for image encryption by polarized light", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2016/07945751/12OmNqGiu9S", "parentPublication": { "id": "proceedings/aiccsa/2016/4320/0", "title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031e552", "title": "Research on Light Polarization FSO-OFDM System", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031e552/12OmNvDqsLP", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567382", "title": "Observation of Mie resonances for a single microsphere using force spectroscopy and two photon excited luminescence in an optical tweezers system", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567382/12OmNxAlA0b", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567440", "title": "Efficient polarization squeezing in optical fibers", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567440/12OmNxVlTJn", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2013/3022/0/3022a852", "title": "Polarization-Based Dehazing Using Two Reference Objects", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a852/12OmNxvO044", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a461", "title": "Synthesis of a New Photochromic Diarylethene and its Application in Holographic Optical Storage", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a461/12OmNzZmZnP", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2014/5188/0/06831819", "title": "Digital refocusing with incoherent holography", "doi": null, "abstractUrl": "/proceedings-article/iccp/2014/06831819/12OmNzgeLBy", "parentPublication": { "id": "proceedings/iccp/2014/5188/0", "title": "2014 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acv/1992/2840/0/00240320", "title": "Liquid crystal polarization camera", "doi": null, "abstractUrl": "/proceedings-article/acv/1992/00240320/12OmNzsrwbG", "parentPublication": { "id": "proceedings/acv/1992/2840/0", "title": "Proceedings IEEE Workshop on Applications of Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2022/9978/0/997800a006", "title": "A Transmission Grating-based Polarization Demodulated Grating Interferometric Sensor", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2022/997800a006/1ByeBzTTXmE", "parentPublication": { "id": "proceedings/icmtma/2022/9978/0", "title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAMEOk", "title": "Proceedings of 1st International Conference on Image Processing", "acronym": "icip", "groupId": "1000349", "volume": "2", "displayVolume": "2", "year": "1994", "__typename": "ProceedingType" }, "article": { "id": "12OmNvsDHJ5", "doi": "10.1109/ICIP.1994.413504", "title": "Calibration for peripheral attenuation in intensity images", "normalizedTitle": "Calibration for peripheral attenuation in intensity images", "abstract": "An image taken from a typical camera loses its intensity and contrast around the periphery due to optical attenuation. A model is derived to characterize this effect quantitatively. This model is derived for a commonly used thick lens using the sine condition, and thus, is more general than those from the Gauss geometrical optics. Based on this model, the authors developed an algorithm to calibrate the periphery attenuation. Some experimental results are presented.<>", "abstracts": [ { "abstractType": "Regular", "content": "An image taken from a typical camera loses its intensity and contrast around the periphery due to optical attenuation. A model is derived to characterize this effect quantitatively. This model is derived for a commonly used thick lens using the sine condition, and thus, is more general than those from the Gauss geometrical optics. Based on this model, the authors developed an algorithm to calibrate the periphery attenuation. Some experimental results are presented.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An image taken from a typical camera loses its intensity and contrast around the periphery due to optical attenuation. A model is derived to characterize this effect quantitatively. This model is derived for a commonly used thick lens using the sine condition, and thus, is more general than those from the Gauss geometrical optics. Based on this model, the authors developed an algorithm to calibrate the periphery attenuation. Some experimental results are presented.", "fno": "00413504", "keywords": [ "Calibration", "Light Scattering", "Light Absorption", "Image Processing", "Video Cameras", "Light Transmission", "Lenses", "Optical Images", "Photographic Lenses", "Geometrical Optics", "Peripheral Attenuation", "Intensity Images", "Contrast", "Optical Attenuation", "Sine Condition", "Thick Lens", "Algorithm", "Calibration", "Attenuation", "Lenses", "Optical Attenuators", "Optical Variables Control", "Geometrical Optics", "Optical Distortion", "Photometry", "Optical Refraction", "Brightness" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Michigan State Univ., East Lansing, MI, USA", "fullName": "Shaoyun Chen", "givenName": null, "surname": "Shaoyun Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Michigan State Univ., East Lansing, MI, USA", "fullName": "Juyang Weng", "givenName": null, "surname": "Juyang Weng", "__typename": "ArticleAuthorType" } ], "idPrefix": "icip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1994-01-01T00:00:00", "pubType": "proceedings", "pages": "992,993,994,995,996", "year": "1994", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00413503", "articleId": "12OmNvDqsKX", "__typename": "AdjacentArticleType" }, "next": { "fno": "00413505", "articleId": "12OmNvRU0jW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iqec/2005/9240/0/01561145", "title": "High sensitive detection of optical constants with phase shift in Terahertz time-domain reflection spectroscopy", "doi": null, "abstractUrl": "/proceedings-article/iqec/2005/01561145/12OmNBCqbCZ", "parentPublication": { "id": "proceedings/iqec/2005/9240/0", "title": "International Quantum Electronics Conference, 2005.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457a568", "title": "What is the Space of Attenuation Coefficients in Underwater Computer Vision?", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a568/12OmNBeRtNb", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2010/6724/2/05520711", "title": "Non-invasive steatosis assessment in NASH through the computerized processing of ultrasound images: Attenuation versus textural parameters", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2010/05520711/12OmNBtl1u5", "parentPublication": { "id": "proceedings/aqtr/2010/6724/2", "title": "International Conference on Automation, Quality and Testing, Robotics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1997/8316/1/00680189", "title": "Estimation of wind velocity and backscatter signal intensity from Doppler lidar returns", "doi": null, "abstractUrl": "/proceedings-article/acssc/1997/00680189/12OmNCf1Drq", "parentPublication": { "id": "proceedings/acssc/1997/8316/1", "title": "Conference Record of the Thirty-First Asilomar Conference on Signals, Systems and Computers (Cat. No.97CB36163)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223148", "title": "Diffuse reflection (intensity reflectance model)", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223148/12OmNwlZu1a", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581761", "title": "Near field optics for nanometric sensing and control", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581761/12OmNxw5B9m", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676153", "title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a409", "title": "Adapting Michelson Contrast for use with Optical See-Through Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a409/1J7WpecpAwU", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2019/5341/0/09001805", "title": "Modulated Retro-Reflector Transdermal Optical Wireless Communication Systems with Wavelength Diversity over Skin-Induced Attenuation and Pointing Errors", "doi": null, "abstractUrl": "/proceedings-article/isspit/2019/09001805/1hHMmjovKM0", "parentPublication": { "id": "proceedings/isspit/2019/5341/0", "title": "2019 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199567", "title": "StainedView: Variable-Intensity Light-Attenuation Display with Cascaded Spatial Color Filtering for Improved Color Fidelity", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199567/1ncgpOWQBig", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmGLPmnLOw", "doi": "10.1109/ICCV48922.2021.00825", "title": "Unsupervised Dense Deformation Embedding Network for Template-Free Shape Correspondence", "normalizedTitle": "Unsupervised Dense Deformation Embedding Network for Template-Free Shape Correspondence", "abstract": "Shape correspondence from 3D deformation learning has attracted appealing academy interests recently. Nevertheless, current deep learning based methods require the supervision of dense annotations to learn per-point translations, which severely over-parameterize the deformation process. Moreover, they fail to capture local geometric details of original shape via global feature embedding. To address these challenges, we develop a new Unsupervised Dense Deformation Embedding Network (i.e., UD<sup>2</sup>E-Net), which learns to predict deformations between non-rigid shapes from dense local features. Since it is non-trivial to match deformation-variant local features for deformation prediction, we develop an Extrinsic-Intrinsic Autoencoder to first encode extrinsic geometric features from source into intrinsic coordinates in a shared canonical shape, with which the decoder then synthesizes corresponding target features. Moreover, a bounded maximum mean discrepancy loss is developed to mitigate the distribution divergence between the synthesized and original features. To learn natural deformation without dense supervision, we introduce a coarse parameterized deformation graph, for which a novel trace and propagation algorithm is proposed to improve both the quality and efficiency of the deformation. Our UD<sup>2</sup>E-Net outperforms state-of-the-art unsupervised methods by 24% on Faust Inter challenge and even supervised methods by 13% on Faust Intra challenge.", "abstracts": [ { "abstractType": "Regular", "content": "Shape correspondence from 3D deformation learning has attracted appealing academy interests recently. Nevertheless, current deep learning based methods require the supervision of dense annotations to learn per-point translations, which severely over-parameterize the deformation process. Moreover, they fail to capture local geometric details of original shape via global feature embedding. To address these challenges, we develop a new Unsupervised Dense Deformation Embedding Network (i.e., UD<sup>2</sup>E-Net), which learns to predict deformations between non-rigid shapes from dense local features. Since it is non-trivial to match deformation-variant local features for deformation prediction, we develop an Extrinsic-Intrinsic Autoencoder to first encode extrinsic geometric features from source into intrinsic coordinates in a shared canonical shape, with which the decoder then synthesizes corresponding target features. Moreover, a bounded maximum mean discrepancy loss is developed to mitigate the distribution divergence between the synthesized and original features. To learn natural deformation without dense supervision, we introduce a coarse parameterized deformation graph, for which a novel trace and propagation algorithm is proposed to improve both the quality and efficiency of the deformation. Our UD<sup>2</sup>E-Net outperforms state-of-the-art unsupervised methods by 24% on Faust Inter challenge and even supervised methods by 13% on Faust Intra challenge.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shape correspondence from 3D deformation learning has attracted appealing academy interests recently. Nevertheless, current deep learning based methods require the supervision of dense annotations to learn per-point translations, which severely over-parameterize the deformation process. Moreover, they fail to capture local geometric details of original shape via global feature embedding. To address these challenges, we develop a new Unsupervised Dense Deformation Embedding Network (i.e., UD2E-Net), which learns to predict deformations between non-rigid shapes from dense local features. Since it is non-trivial to match deformation-variant local features for deformation prediction, we develop an Extrinsic-Intrinsic Autoencoder to first encode extrinsic geometric features from source into intrinsic coordinates in a shared canonical shape, with which the decoder then synthesizes corresponding target features. Moreover, a bounded maximum mean discrepancy loss is developed to mitigate the distribution divergence between the synthesized and original features. To learn natural deformation without dense supervision, we introduce a coarse parameterized deformation graph, for which a novel trace and propagation algorithm is proposed to improve both the quality and efficiency of the deformation. Our UD2E-Net outperforms state-of-the-art unsupervised methods by 24% on Faust Inter challenge and even supervised methods by 13% on Faust Intra challenge.", "fno": "281200i341", "keywords": [ "Deep Learning", "Computer Vision", "Three Dimensional Displays", "Shape", "Annotations", "Cognition", "Decoding", "Transfer Low Shot Semi Unsupervised Learning", "3 D From A Single Image And Shape From X", "Gestures And Body Pose", "Vision Applications And Systems" ], "authors": [ { "affiliation": "Shenyang Institute of Automation, Chinese Academy of Sciences,State Key Laboratory of Robotics", "fullName": "Ronghan Chen", "givenName": "Ronghan", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Shenyang Institute of Automation, Chinese Academy of Sciences,State Key Laboratory of Robotics", "fullName": "Yang Cong", "givenName": "Yang", "surname": "Cong", "__typename": "ArticleAuthorType" }, { "affiliation": "Shenyang Institute of Automation, Chinese Academy of Sciences,State Key Laboratory of Robotics", "fullName": "Jiahua Dong", "givenName": "Jiahua", "surname": "Dong", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "8341-8350", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200i330", "articleId": "1BmJ6aPyFqM", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200i351", "articleId": "1BmLig9g8jS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890288", "title": "A new sparse feature-based patch for dense correspondence", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890288/12OmNyr8YpE", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/07/07973095", "title": "Dense 3D Face Correspondence", "doi": null, "abstractUrl": "/journal/tp/2018/07/07973095/13rRUwInv5K", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b034", "title": "DGC-Net: Dense Geometric Correspondence Network", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b034/18j8IJUYz1S", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g614", "title": "CaDeX: Learning Canonical Deformation Coordinate Space for Dynamic Surface Representation via Neural Homeomorphism", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g614/1H1j6MnwMo0", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a032", "title": "Spectral Teacher for a Spatial Student: Spectrum-Aware Real-Time Dense Shape Correspondence", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a032/1KYsxkE8paM", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0936", "title": "Boosting Local Shape Matching for Dense 3D Face Correspondence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0936/1gyrj1AnG6Y", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093482", "title": "Geometric Image Correspondence Verification by Dense Pixel Matching", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093482/1jPbB7Bbc40", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09462361", "title": "A Sparse and Locally Coherent Morphable Face Model for Dense Semantic Correspondence Across Heterogeneous 3D Faces", "doi": null, "abstractUrl": "/journal/tp/2022/10/09462361/1uDSvKpKHG8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0281", "title": "Deformed Implicit Field: Modeling 3D Shapes with Learned Dense Correspondence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0281/1yeJP8p8pgI", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b450", "title": "Neural Deformation Graphs for Globally-consistent Non-rigid Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b450/1yeJlVNk3bW", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirr", "title": "Computer Science and Software Engineering, International Conference on", "acronym": "csse", "groupId": "1002553", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNAlvHx2", "doi": "10.1109/CSSE.2008.1332", "title": "Manifold Alignment via Local Tangent Space Alignment", "normalizedTitle": "Manifold Alignment via Local Tangent Space Alignment", "abstract": "Manifold alignment is about mapping several datasets into a global space, and is of great importance in learning the shared latent structure, data fusion and multicue data matching. In this paper, we propose an algorithm to solve this problem via Local Tangent Space Alignment (LTSA). LTSA is used here as a method to find the inner manifold constraint of each dataset. A cost function to measure the quality of alignment is given by combining the inner manifold constraints of each dataset and the matching points constraints among different datasets. The effectiveness of our algorithm is validated by applying it to the problem of image sequences alignment.", "abstracts": [ { "abstractType": "Regular", "content": "Manifold alignment is about mapping several datasets into a global space, and is of great importance in learning the shared latent structure, data fusion and multicue data matching. In this paper, we propose an algorithm to solve this problem via Local Tangent Space Alignment (LTSA). LTSA is used here as a method to find the inner manifold constraint of each dataset. A cost function to measure the quality of alignment is given by combining the inner manifold constraints of each dataset and the matching points constraints among different datasets. The effectiveness of our algorithm is validated by applying it to the problem of image sequences alignment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Manifold alignment is about mapping several datasets into a global space, and is of great importance in learning the shared latent structure, data fusion and multicue data matching. In this paper, we propose an algorithm to solve this problem via Local Tangent Space Alignment (LTSA). LTSA is used here as a method to find the inner manifold constraint of each dataset. A cost function to measure the quality of alignment is given by combining the inner manifold constraints of each dataset and the matching points constraints among different datasets. The effectiveness of our algorithm is validated by applying it to the problem of image sequences alignment.", "fno": "3336a928", "keywords": [ "Manifold Alignment", "LTSA", "Data Matching" ], "authors": [ { "affiliation": null, "fullName": "Gelan Yang", "givenName": "Gelan", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xue Xu", "givenName": "Xue", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianming Zhang", "givenName": "Jianming", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "csse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "928-931", "year": "2008", "issn": null, "isbn": "978-0-7695-3336-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3336a923", "articleId": "12OmNAoDicj", "__typename": "AdjacentArticleType" }, "next": { "fno": "3336a932", "articleId": "12OmNwpoFMs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2013/2840/0/2840a129", "title": "Unsupervised Random Forest Manifold Alignment for Lipreading", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840a129/12OmNCvcLIR", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460122", "title": "Local tangent space based manifold entropy for image retrieval", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460122/12OmNvFpEvK", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2005/9136/0/01532627", "title": "Dimension reduction of microarray data based on local tangent space alignment", "doi": null, "abstractUrl": "/proceedings-article/icci/2005/01532627/12OmNvHY2EL", "parentPublication": { "id": "proceedings/icci/2005/9136/0", "title": "Proceedings of the Fourth IEEE International Conference on Cognitive Informatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a046", "title": "A Fault Feature Extraction Method for Machine Health Diagnosis Using Manifold Learning", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a046/12OmNwDACpN", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995583", "title": "Graph matching through entropic manifold alignment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995583/12OmNx8wTuA", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmens/2009/3938/0/3938a105", "title": "Manifold Alignment by Scalable Constraints of the Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/icmens/2009/3938a105/12OmNxwWotX", "parentPublication": { "id": "proceedings/icmens/2009/3938/0", "title": "MEMS, NANO, and Smart Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2011/4588/0/4588a614", "title": "Image Matting via Local Tangent Space Alignment", "doi": null, "abstractUrl": "/proceedings-article/dicta/2011/4588a614/12OmNxzuMOJ", "parentPublication": { "id": "proceedings/dicta/2011/4588/0", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wkdd/2009/3543/0/3543a697", "title": "Manifold Alignment via Local Block Coordinate", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2009/3543a697/12OmNy2Jt3l", "parentPublication": { "id": "proceedings/wkdd/2009/3543/0", "title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/03/ttp2013030697", "title": "Spatiotemporal Alignment of Visual Signals on a Special Manifold", "doi": null, "abstractUrl": "/journal/tp/2013/03/ttp2013030697/13rRUwbs227", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/08/06095564", "title": "Unsupervised Image Matching Based on Manifold Alignment", "doi": null, "abstractUrl": "/journal/tp/2012/08/06095564/13rRUygT7gx", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy9Prj1", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNvkpl1O", "doi": "10.1109/ICCVW.2017.213", "title": "Manifold Constrained Low-Rank Decomposition", "normalizedTitle": "Manifold Constrained Low-Rank Decomposition", "abstract": "Low-rank decomposition (LRD) is a state-of-the-art method for visual data reconstruction and modelling. However, it is a very challenging problem when the image data contains significant occlusion, noise, illumination variation, and misalignment from rotation or viewpoint changes. We leverage the specific structure of data in order to improve the performance of LRD when the data are not ideal. To this end, we propose a new framework that embeds manifold priors into LRD. To implement the framework, we design an alternating direction method of multipliers (ADMM) method which efficiently integrates the manifold constraints during the optimization process. The proposed approach is successfully used to calculate low-rank models from face images, hand-written digits and planar surface images. The results show a consistent increase of performance when compared to the state-of-the-art over a wide range of realistic image misalignments and corruptions.", "abstracts": [ { "abstractType": "Regular", "content": "Low-rank decomposition (LRD) is a state-of-the-art method for visual data reconstruction and modelling. However, it is a very challenging problem when the image data contains significant occlusion, noise, illumination variation, and misalignment from rotation or viewpoint changes. We leverage the specific structure of data in order to improve the performance of LRD when the data are not ideal. To this end, we propose a new framework that embeds manifold priors into LRD. To implement the framework, we design an alternating direction method of multipliers (ADMM) method which efficiently integrates the manifold constraints during the optimization process. The proposed approach is successfully used to calculate low-rank models from face images, hand-written digits and planar surface images. The results show a consistent increase of performance when compared to the state-of-the-art over a wide range of realistic image misalignments and corruptions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Low-rank decomposition (LRD) is a state-of-the-art method for visual data reconstruction and modelling. However, it is a very challenging problem when the image data contains significant occlusion, noise, illumination variation, and misalignment from rotation or viewpoint changes. We leverage the specific structure of data in order to improve the performance of LRD when the data are not ideal. To this end, we propose a new framework that embeds manifold priors into LRD. To implement the framework, we design an alternating direction method of multipliers (ADMM) method which efficiently integrates the manifold constraints during the optimization process. The proposed approach is successfully used to calculate low-rank models from face images, hand-written digits and planar surface images. The results show a consistent increase of performance when compared to the state-of-the-art over a wide range of realistic image misalignments and corruptions.", "fno": "1034b800", "keywords": [ "Data Structures", "Face Recognition", "Image Reconstruction", "LRD", "State Of The Art Method", "Visual Data Reconstruction", "Illumination Variation", "Low Rank Models", "Face Images", "Planar Surface Images", "Realistic Image Misalignments", "Manifold Constrained Low Rank Decomposition", "Alternating Direction Method Of Multipliers Method", "Data Structure", "ADMM Method", "Optimization Process", "Hand Written Digits", "Manifolds", "Optimization", "Data Models", "Videos", "Computer Vision", "Matrix Decomposition", "Transmission Line Matrix Methods" ], "authors": [ { "affiliation": "Center for Research in Computer Vision (CRCV), University of Central Florida (UCF)", "fullName": "Chen Chen", "givenName": "Chen", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Automation Science and Electrical Engineering, Beihang University, Beijing, China", "fullName": "Baochang Zhang", "givenName": "Baochang", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Istituto Italiano di Tecnologia, Genova, Italy", "fullName": "Alessio Del Bue", "givenName": "Alessio", "surname": "Del Bue", "__typename": "ArticleAuthorType" }, { "affiliation": "Istituto Italiano di Tecnologia, Genova, Italy", "fullName": "Vittorio Murino", "givenName": "Vittorio", "surname": "Murino", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "1800-1808", "year": "2017", "issn": "2473-9944", "isbn": "978-1-5386-1034-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1034b792", "articleId": "12OmNyrqzk9", "__typename": "AdjacentArticleType" }, "next": { "fno": "1034b809", "articleId": "12OmNxaNGkt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130473", "title": "Tracking visual and infrared objects using joint Riemannian manifold appearance and affine shape modeling", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130473/12OmNC3FGba", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a744", "title": "Ensemble Manifold Structured Low Rank Approximation for Data Representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a744/12OmNqEjhWS", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2011/4408/0/4408b266", "title": "Low Rank Metric Learning with Manifold Regularization", "doi": null, "abstractUrl": "/proceedings-article/icdm/2011/4408b266/12OmNqGRGiH", "parentPublication": { "id": "proceedings/icdm/2011/4408/0", "title": "2011 IEEE 11th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/07/ttp2013071717", "title": "Low-Rank Matrix Approximation with Manifold Regularization", "doi": null, "abstractUrl": "/journal/tp/2013/07/ttp2013071717/13rRUyYBlhN", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/04/09893338", "title": "Automatic Estimation of Self-Reported Pain by Trajectory Analysis in the Manifold of Fixed Rank Positive Semi-Definite Matrices", "doi": null, "abstractUrl": "/journal/ta/2022/04/09893338/1GGLajz9tBe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09966832", "title": "Tangent Space Based Alternating Projections for Nonnegative Low Rank Matrix Approximation", "doi": null, "abstractUrl": "/journal/tk/5555/01/09966832/1IIYdPVtZv2", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b348", "title": "Multi-view Clustering via Simultaneously Learning Graph Regularized Low-Rank Tensor Representation and Affinity Matrix", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b348/1cdOJijqQMM", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3773", "title": "Estimating Low-Rank Region Likelihood Maps", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3773/1m3nAEILgVG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412987", "title": "A Spectral Clustering on Grassmann Manifold via Double Low Rank Constraint", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412987/1tmiCQxgKE8", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412242", "title": "Low Rank Representation on Product Grassmann Manifolds for Multi-view Subspace Clustering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412242/1tmikTAVIYM", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawmY", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNwoPtyX", "doi": "10.1109/SIBGRAPI.2012.30", "title": "Invariance for Single Curved Manifold", "normalizedTitle": "Invariance for Single Curved Manifold", "abstract": "Recently, it has been shown that, for Lambert illumination model, solely scenes composed by developable objects with a very particular albedo distribution produce an (2D) image with isolines that are (almost) invariant to light direction change. In this work, we provide and investigate a more general framework, and we show that, in general, the requirement for such in variances is quite strong, and is related to the differential geometry of the objects. More precisely, it is proved that single curved manifolds, i.e., manifolds such that at each point there is at most one principal curvature direction, produce invariant is surfaces for a certain relevant family of energy functions. In the three-dimensional case, the associated energy function corresponds to the classical Lambert illumination model with albedo. This result is also extended for finite-dimensional scenes composed by single curved objects.", "abstracts": [ { "abstractType": "Regular", "content": "Recently, it has been shown that, for Lambert illumination model, solely scenes composed by developable objects with a very particular albedo distribution produce an (2D) image with isolines that are (almost) invariant to light direction change. In this work, we provide and investigate a more general framework, and we show that, in general, the requirement for such in variances is quite strong, and is related to the differential geometry of the objects. More precisely, it is proved that single curved manifolds, i.e., manifolds such that at each point there is at most one principal curvature direction, produce invariant is surfaces for a certain relevant family of energy functions. In the three-dimensional case, the associated energy function corresponds to the classical Lambert illumination model with albedo. This result is also extended for finite-dimensional scenes composed by single curved objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recently, it has been shown that, for Lambert illumination model, solely scenes composed by developable objects with a very particular albedo distribution produce an (2D) image with isolines that are (almost) invariant to light direction change. In this work, we provide and investigate a more general framework, and we show that, in general, the requirement for such in variances is quite strong, and is related to the differential geometry of the objects. More precisely, it is proved that single curved manifolds, i.e., manifolds such that at each point there is at most one principal curvature direction, produce invariant is surfaces for a certain relevant family of energy functions. In the three-dimensional case, the associated energy function corresponds to the classical Lambert illumination model with albedo. This result is also extended for finite-dimensional scenes composed by single curved objects.", "fno": "4829a158", "keywords": [ "Manifolds", "Isosurfaces", "Vectors", "Lighting", "Geometry", "Mathematical Model", "Surface Treatment", "Developable Surface", "Invariance", "Pattern Recognition" ], "authors": [ { "affiliation": null, "fullName": "Pedro Machado Manhaes de Castro", "givenName": "Pedro Machado Manhaes de", "surname": "Castro", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-08-01T00:00:00", "pubType": "proceedings", "pages": "158-165", "year": "2012", "issn": "1530-1834", "isbn": "978-1-4673-2802-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4829a150", "articleId": "12OmNCdBDWk", "__typename": "AdjacentArticleType" }, "next": { "fno": "4829a166", "articleId": "12OmNylsZPB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/1990/2057/0/00139604", "title": "Invariance-a new framework for vision", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139604/12OmNBEGYHl", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239199", "title": "Manifold-based fingerprinting for target identification", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239199/12OmNBhHtj6", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139537", "title": "Representing surface curvature discontinuities on curved surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139537/12OmNvT2peK", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206841", "title": "3D reconstruction of curved objects from single 2D line drawings", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206841/12OmNwwuDPa", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bmei/2008/3118/1/3118a845", "title": "A Robust Feature-Based Method for Mosaic of the Curved Human Color Retinal Images", "doi": null, "abstractUrl": "/proceedings-article/bmei/2008/3118a845/12OmNwwuDXb", "parentPublication": { "id": "proceedings/bmei/2008/3118/1", "title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi-t/2017/0619/0/0619a042", "title": "Geometric Data Analysis Based on Manifold Learning with Applications for Image Understanding", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi-t/2017/0619a042/12OmNx8Ounr", "parentPublication": { "id": "proceedings/sibgrapi-t/2017/0619/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2002/12/i1579", "title": "Identifying Faces in a 2D Line Drawing Representing a Manifold Object", "doi": null, "abstractUrl": "/journal/tp/2002/12/i1579/13rRUwhHcRL", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/09/07558244", "title": "Knot Optimization for Biharmonic B-splines on Manifold Triangle Meshes", "doi": null, "abstractUrl": "/journal/tg/2017/09/07558244/13rRUxly95G", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2017/10/07984849", "title": "Manifold Learning by Curved Cosine Mapping", "doi": null, "abstractUrl": "/journal/tk/2017/10/07984849/13rRUygT7nn", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09667017", "title": "Deep Parametric Surfaces for 3D Outfit Reconstruction from Single View Image", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09667017/1A6BEk9xRaU", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx5GU2w", "title": "2013 IEEE International Conference on High Performance Computing and Communications (HPCC) & 2013 IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "acronym": "hpcc-euc", "groupId": "1002461", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNx965wy", "doi": "10.1109/HPCC.and.EUC.2013.242", "title": "A New Algorithm for Repairing Non-manifold Surfaces", "normalizedTitle": "A New Algorithm for Repairing Non-manifold Surfaces", "abstract": "In the process of solid modeling, non-manifold polygon surfaces may be met frequently. However, most graphics algorithms applied on polygon surfaces require that polygon surfaces must be manifold. Generally, non-manifold surfaces can be transferred into manifold surfaces with similar geometric appearance. The transfer operations include modifying non-manifold surface edges and non-manifold surface vertices. However, new nonmanifold may be produced when being transferred from nonmanifold surface in mesh modeling. To avoid of generating nonmanifold, we propose a new method of non-manifold transferring algorithm. Compared with existed non-manifold surface algorithm, this new repairing algorithm would not produce new non-manifold surfaces after transferring and can be operated and used directly and conveniently.", "abstracts": [ { "abstractType": "Regular", "content": "In the process of solid modeling, non-manifold polygon surfaces may be met frequently. However, most graphics algorithms applied on polygon surfaces require that polygon surfaces must be manifold. Generally, non-manifold surfaces can be transferred into manifold surfaces with similar geometric appearance. The transfer operations include modifying non-manifold surface edges and non-manifold surface vertices. However, new nonmanifold may be produced when being transferred from nonmanifold surface in mesh modeling. To avoid of generating nonmanifold, we propose a new method of non-manifold transferring algorithm. Compared with existed non-manifold surface algorithm, this new repairing algorithm would not produce new non-manifold surfaces after transferring and can be operated and used directly and conveniently.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the process of solid modeling, non-manifold polygon surfaces may be met frequently. However, most graphics algorithms applied on polygon surfaces require that polygon surfaces must be manifold. Generally, non-manifold surfaces can be transferred into manifold surfaces with similar geometric appearance. The transfer operations include modifying non-manifold surface edges and non-manifold surface vertices. However, new nonmanifold may be produced when being transferred from nonmanifold surface in mesh modeling. To avoid of generating nonmanifold, we propose a new method of non-manifold transferring algorithm. Compared with existed non-manifold surface algorithm, this new repairing algorithm would not produce new non-manifold surfaces after transferring and can be operated and used directly and conveniently.", "fno": "06832124", "keywords": [ "Manifolds", "Surface Treatment", "Solid Modeling", "Data Structures", "Graphics", "Design Automation", "Surface Cracks", "Repairing Algorithm", "Surface Modeling", "Non Manifold" ], "authors": [ { "affiliation": null, "fullName": "Yaoping Fei", "givenName": "Yaoping", "surname": "Fei", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Songqiao Chen", "givenName": "Songqiao", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dan Su", "givenName": "Dan", "surname": "Su", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianping Luo", "givenName": "Jianping", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Min Li", "givenName": "Min", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpcc-euc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-11-01T00:00:00", "pubType": "proceedings", "pages": "1704-1708", "year": "2013", "issn": null, "isbn": "978-0-7695-5088-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06832123", "articleId": "12OmNBhpS94", "__typename": "AdjacentArticleType" }, "next": { "fno": "06832125", "articleId": "12OmNzsJ7rU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2005/2392/0/23920257", "title": "Adaptive Polygonisation of Non-Manifold Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2005/23920257/12OmNAq3hTA", "parentPublication": { "id": "proceedings/cgiv/2005/2392/0", "title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2002/1784/0/17840475", "title": "Interactive Visualization of Non-Manifold Implicit Surfaces Using Pre-Integrated Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840475/12OmNBqv2go", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532824", "title": "Reconstructing manifold and non-manifold surfaces from point clouds", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532824/12OmNrkT7FS", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2010/4166/0/4166a026", "title": "Polygonisation of Non-manifold Implicit Surfaces Using a Dual Grid and Points", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a026/12OmNwDACjb", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2002/1674/0/16740138", "title": "Non-Manifold Implicit Surfaces Based on Discontinuous Implicitization and Polygonization", "doi": null, "abstractUrl": "/proceedings-article/gmp/2002/16740138/12OmNwbLVmQ", "parentPublication": { "id": "proceedings/gmp/2002/1674/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2017/2219/0/2219a039", "title": "Repairing Non-Manifold Boundaries of Segmented Simplicial Meshes", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a039/12OmNxWcHfu", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660053", "title": "Reconstructing Manifold and Non-Manifold Surfaces from Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660053/12OmNxbmSzt", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760383", "title": "Converting Sets of Polygons to Manifold Surfaces by Cutting and Stitching", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760383/12OmNzUPpvx", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/02/v0136", "title": "Cutting and Stitching: Converting Sets of Polygons to Manifold Surfaces", "doi": null, "abstractUrl": "/journal/tg/2001/02/v0136/13rRUwI5UfR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv-2/2019/2850/0/285000a160", "title": "Hybrid Polygon-Point Rendering of Singular and Non-Manifold Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/iv-2/2019/285000a160/1cMEQnNfRXG", "parentPublication": { "id": "proceedings/iv-2/2019/2850/0", "title": "2019 23rd International Conference in Information Visualization – Part II", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbCrVT", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNy3RRIr", "doi": "10.1109/CVPR.2014.497", "title": "Active Flattening of Curved Document Images via Two Structured Beams", "normalizedTitle": "Active Flattening of Curved Document Images via Two Structured Beams", "abstract": "Document images captured by a digital camera often suffer from serious geometric distortions. In this paper, we propose an active method to correct geometric distortions in a camera-captured document image. Unlike many passive rectification methods that rely on text-lines or features extracted from images, our method uses two structured beams illuminating upon the document page to recover two spatial curves. A developable surface is then interpolated to the curves by finding the correspondence between them. The developable surface is finally flattened onto a plane by solving a system of ordinary differential equations. Our method is a content independent approach and can restore a corrected document image of high accuracy with undistorted contents. Experimental results on a variety of real-captured document images demonstrate the effectiveness and efficiency of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Document images captured by a digital camera often suffer from serious geometric distortions. In this paper, we propose an active method to correct geometric distortions in a camera-captured document image. Unlike many passive rectification methods that rely on text-lines or features extracted from images, our method uses two structured beams illuminating upon the document page to recover two spatial curves. A developable surface is then interpolated to the curves by finding the correspondence between them. The developable surface is finally flattened onto a plane by solving a system of ordinary differential equations. Our method is a content independent approach and can restore a corrected document image of high accuracy with undistorted contents. Experimental results on a variety of real-captured document images demonstrate the effectiveness and efficiency of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Document images captured by a digital camera often suffer from serious geometric distortions. In this paper, we propose an active method to correct geometric distortions in a camera-captured document image. Unlike many passive rectification methods that rely on text-lines or features extracted from images, our method uses two structured beams illuminating upon the document page to recover two spatial curves. A developable surface is then interpolated to the curves by finding the correspondence between them. The developable surface is finally flattened onto a plane by solving a system of ordinary differential equations. Our method is a content independent approach and can restore a corrected document image of high accuracy with undistorted contents. Experimental results on a variety of real-captured document images demonstrate the effectiveness and efficiency of the proposed method.", "fno": "5118d890", "keywords": [ "Three Dimensional Displays", "Laser Beams", "Shape", "Digital Cameras", "Vectors", "Surface Treatment", "Estimation", "Developable Surface Interpolation", "Document Image Processing", "Geometric Rectification", "Structured Beams" ], "authors": [ { "affiliation": null, "fullName": "Gaofeng Meng", "givenName": "Gaofeng", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ying Wang", "givenName": "Ying", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shenquan Qu", "givenName": "Shenquan", "surname": "Qu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shiming Xiang", "givenName": "Shiming", "surname": "Xiang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chunhong Pan", "givenName": "Chunhong", "surname": "Pan", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "3890-3897", "year": "2014", "issn": "1063-6919", "isbn": "978-1-4799-5118-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5118d882", "articleId": "12OmNyL0TtT", "__typename": "AdjacentArticleType" }, "next": { "fno": "5118d898", "articleId": "12OmNA0MZ6e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2006/2521/1/252110971", "title": "Document Flattening through Grid Modeling and Regularization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252110971/12OmNAYoKge", "parentPublication": { "id": "proceedings/icpr/2006/2521/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2012/4829/0/4829a158", "title": "Invariance for Single Curved Manifold", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2012/4829a158/12OmNwoPtyX", "parentPublication": { "id": "proceedings/sibgrapi/2012/4829/0", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995540", "title": "Rectification and 3D reconstruction of curved document images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995540/12OmNy50giP", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a226", "title": "Locally Developable Constraint for Document Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a226/12OmNyrIaCe", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/04/ttp2012040707", "title": "Metric Rectification of Curved Document Images", "doi": null, "abstractUrl": "/journal/tp/2012/04/ttp2012040707/13rRUwjoNyl", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030518", "title": "Quasi-Developable Mesh Surface Interpolation via Mesh Deformation", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030518/13rRUxjQybN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/02/07866848", "title": "Multiview Rectification of Folded Documents", "doi": null, "abstractUrl": "/journal/tp/2018/02/07866848/13rRUytWFas", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/04/08576546", "title": "Baselines Extraction from Curved Document Images via Slope Fields Recovery", "doi": null, "abstractUrl": "/journal/tp/2020/04/08576546/17D45WaTkmA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2019/1838/0/183800a033", "title": "Rectification of Camera-Captured Document Images with Mixed Contents and Varied Layouts", "doi": null, "abstractUrl": "/proceedings-article/crv/2019/183800a033/1cMGurwZqKY", "parentPublication": { "id": "proceedings/crv/2019/1838/0", "title": "2019 16th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1B12DGrwoyQ", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1B13oiDeNsQ", "doi": "10.1109/WACV51458.2022.00173", "title": "Generalized Clustering and Multi-Manifold Learning with Geometric Structure Preservation", "normalizedTitle": "Generalized Clustering and Multi-Manifold Learning with Geometric Structure Preservation", "abstract": "Though manifold-based clustering has become a popular research topic, we observe that one important factor has been omitted by these works, namely that the defined clustering loss may corrupt the local and global structure of the latent space. In this paper, we propose a novel Generalized Clustering and Multi-manifold Learning (GCML) framework with geometric structure preservation for generalized data, i.e., not limited to 2-D image data and has a wide range of applications in speech, text, and biology domains. In the proposed framework, manifold clustering is done in the latent space guided by a clustering loss. To overcome the problem that the clustering-oriented loss may deteriorate the geometric structure of the latent space, an isometric loss is proposed for preserving intra-manifold structure locally and a ranking loss for inter-manifold structure globally. Extensive experimental results have shown that GCML exhibits superior performance to counterparts in terms of qualitative visualizations and quantitative metrics, which demonstrates the effectiveness of preserving geometric structure. Code has been made available at: https://github.com/LirongWu/GCML.", "abstracts": [ { "abstractType": "Regular", "content": "Though manifold-based clustering has become a popular research topic, we observe that one important factor has been omitted by these works, namely that the defined clustering loss may corrupt the local and global structure of the latent space. In this paper, we propose a novel Generalized Clustering and Multi-manifold Learning (GCML) framework with geometric structure preservation for generalized data, i.e., not limited to 2-D image data and has a wide range of applications in speech, text, and biology domains. In the proposed framework, manifold clustering is done in the latent space guided by a clustering loss. To overcome the problem that the clustering-oriented loss may deteriorate the geometric structure of the latent space, an isometric loss is proposed for preserving intra-manifold structure locally and a ranking loss for inter-manifold structure globally. Extensive experimental results have shown that GCML exhibits superior performance to counterparts in terms of qualitative visualizations and quantitative metrics, which demonstrates the effectiveness of preserving geometric structure. Code has been made available at: https://github.com/LirongWu/GCML.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Though manifold-based clustering has become a popular research topic, we observe that one important factor has been omitted by these works, namely that the defined clustering loss may corrupt the local and global structure of the latent space. In this paper, we propose a novel Generalized Clustering and Multi-manifold Learning (GCML) framework with geometric structure preservation for generalized data, i.e., not limited to 2-D image data and has a wide range of applications in speech, text, and biology domains. In the proposed framework, manifold clustering is done in the latent space guided by a clustering loss. To overcome the problem that the clustering-oriented loss may deteriorate the geometric structure of the latent space, an isometric loss is proposed for preserving intra-manifold structure locally and a ranking loss for inter-manifold structure globally. Extensive experimental results have shown that GCML exhibits superior performance to counterparts in terms of qualitative visualizations and quantitative metrics, which demonstrates the effectiveness of preserving geometric structure. Code has been made available at: https://github.com/LirongWu/GCML.", "fno": "091500b668", "keywords": [ "Image Processing", "Learning Artificial Intelligence", "Pattern Clustering", "Local Structure", "Global Structure", "Latent Space", "Geometric Structure Preservation", "Generalized Data", "2 D Image Data", "Manifold Clustering", "Clustering Oriented Loss", "Isometric Loss", "Intra Manifold Structure", "Ranking Loss", "Inter Manifold Structure", "Manifold Based Clustering", "Generalized Clustering And Multimanifold Learning Framework", "Manifolds", "Measurement", "Computer Vision", "Codes", "Clustering Algorithms", "Biology", "Deep Learning Clustering" ], "authors": [ { "affiliation": "Zhejiang University", "fullName": "Lirong Wu", "givenName": "Lirong", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Westlake University", "fullName": "Zicheng Liu", "givenName": "Zicheng", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Westlake University", "fullName": "Jun Xia", "givenName": "Jun", "surname": "Xia", "__typename": "ArticleAuthorType" }, { "affiliation": "Westlake University", "fullName": "Zelin Zang", "givenName": "Zelin", "surname": "Zang", "__typename": "ArticleAuthorType" }, { "affiliation": "Westlake University", "fullName": "Siyuan Li", "givenName": "Siyuan", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Westlake University", "fullName": "Stan Z. Li", "givenName": "Stan Z.", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-01-01T00:00:00", "pubType": "proceedings", "pages": "1668-1676", "year": "2022", "issn": null, "isbn": "978-1-6654-0915-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1B13ofzCKzK", "name": "pwacv202209150-09706902s1-mm_091500b668.zip", "size": "197 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pwacv202209150-09706902s1-mm_091500b668.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "091500b658", "articleId": "1B13N150tHO", "__typename": "AdjacentArticleType" }, "next": { "fno": "091500b677", "articleId": "1B12F4IJ8ME", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icde/2017/6543/0/6543a079", "title": "Clustering with Adaptive Manifold Structure Learning", "doi": null, "abstractUrl": "/proceedings-article/icde/2017/6543a079/12OmNAoDijI", "parentPublication": { "id": "proceedings/icde/2017/6543/0", "title": "2017 IEEE 33rd International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d145", "title": "Grassmannian Manifold Optimization Assisted Sparse Spectral Clustering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d145/12OmNClQ0AC", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2014/4302/0/4302b103", "title": "Multi-view Clustering via Multi-manifold Regularized Nonnegative Matrix Factorization", "doi": null, "abstractUrl": "/proceedings-article/icdm/2014/4302b103/12OmNqBtiY8", "parentPublication": { "id": "proceedings/icdm/2014/4302/0", "title": "2014 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2017/11/07995073", "title": "Robust Dual Clustering with Adaptive Manifold Regularization", "doi": null, "abstractUrl": "/journal/tk/2017/11/07995073/13rRUy0qnGO", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/05/09693201", "title": "Local-to-Global Deep Clustering on Approximate Uniform Manifold", "doi": null, "abstractUrl": "/journal/tk/2023/05/09693201/1As6RGZBmsU", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4841", "title": "Manifold Alignment for Semantically Aligned Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4841/1BmKlLwp9y8", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900a876", "title": "Data-driven Kernel Subspace Clustering with Local Manifold Preservation", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900a876/1KBr6RsmA8g", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a949", "title": "Filter Guided Manifold Optimization in the Autoencoder Latent Space", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a949/1iTvffvAnPq", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413131", "title": "N2D: (Not Too) Deep Clustering via Clustering the Local Manifold of an Autoencoded Embedding", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413131/1tmizVx9ldu", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900e411", "title": "Learning low bending and low distortion manifold embeddings", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900e411/1yXsX4qLBNC", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqGA5hy", "title": "High-Performance Computing in the Asia-Pacific Region, International Conference on", "acronym": "hpc", "groupId": "1000321", "volume": "2", "displayVolume": "3", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNBqdral", "doi": "10.1109/HPC.2000.843603", "title": "Across-Platform Computing of Groundwater and Adaptive Grid", "normalizedTitle": "Across-Platform Computing of Groundwater and Adaptive Grid", "abstract": "In this study, we provide a Web-based interface for numerical data representation based on WAN Wide Area Networking. Furthermore, we intend to give facilities to communicate with each module and make the package developing and maintaining easy. The system named Visual WIS (Water Information System) has been developed by JDK1.2. Through incorporating this Java-based tool into GMS, many groundwater problems can be solved and demonstrated in World Wide Web. A procedure to adjust the control functions, which was proposed by Yen and Kuo, is adopted for generating curvilinear grid nodes to specified locations of the study cases.", "abstracts": [ { "abstractType": "Regular", "content": "In this study, we provide a Web-based interface for numerical data representation based on WAN Wide Area Networking. Furthermore, we intend to give facilities to communicate with each module and make the package developing and maintaining easy. The system named Visual WIS (Water Information System) has been developed by JDK1.2. Through incorporating this Java-based tool into GMS, many groundwater problems can be solved and demonstrated in World Wide Web. A procedure to adjust the control functions, which was proposed by Yen and Kuo, is adopted for generating curvilinear grid nodes to specified locations of the study cases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this study, we provide a Web-based interface for numerical data representation based on WAN Wide Area Networking. Furthermore, we intend to give facilities to communicate with each module and make the package developing and maintaining easy. The system named Visual WIS (Water Information System) has been developed by JDK1.2. Through incorporating this Java-based tool into GMS, many groundwater problems can be solved and demonstrated in World Wide Web. A procedure to adjust the control functions, which was proposed by Yen and Kuo, is adopted for generating curvilinear grid nodes to specified locations of the study cases.", "fno": "05891064", "keywords": [ "Wide Area Networking", "Use Orientation", "Across Platform", "And Visualization" ], "authors": [ { "affiliation": "National Center for High-Performance Computing", "fullName": "Chia-Chen Kuo", "givenName": "Chia-Chen", "surname": "Kuo", "__typename": "ArticleAuthorType" }, { "affiliation": "National Center for High-Performance Computing", "fullName": "Zhou-Jin Wu", "givenName": "Zhou-Jin", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "National Center for High-Performance Computing", "fullName": "Whey-Fone Tsai", "givenName": "Whey-Fone", "surname": "Tsai", "__typename": "ArticleAuthorType" }, { "affiliation": "National Center for High-Performance Computing", "fullName": "Shung-Hui Chen", "givenName": "Shung-Hui", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "National Center for High-Performance Computing", "fullName": "Ruey-Shyang Wu", "givenName": "Ruey-Shyang", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpc", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-05-01T00:00:00", "pubType": "proceedings", "pages": "1064", "year": "2000", "issn": null, "isbn": "0-7695-0589-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05891058", "articleId": "12OmNzIl3CI", "__typename": "AdjacentArticleType" }, "next": { "fno": "05891070", "articleId": "12OmNvAiS8F", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWcH14", "title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)", "acronym": "cis", "groupId": "1001517", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNCcKQCf", "doi": "10.1109/CIS.2013.139", "title": "Feature Emphasized OLIC for 2D Flow Visualization", "normalizedTitle": "Feature Emphasized OLIC for 2D Flow Visualization", "abstract": "In order to emphasize features in flow textures while preserving the orientation of flows in 2D fields, a new method to generate flow textures with non-uniform streamlets is proposed in this paper. In the method, a control grid is built and divided into different regions that reflect the complexity of the underlying flow field. The resulting regions are then used to control to the distribution of droplets and the corresponding calculation of streamlets involved in OLIC. By this means, streamlets are dense around salient flow features, while in other areas they are sparse. The disparity of streamlets highlights features in flow fields. Test results show that our method can achieve pleasing visual effect. It is flexible and superior to the original OLIC.", "abstracts": [ { "abstractType": "Regular", "content": "In order to emphasize features in flow textures while preserving the orientation of flows in 2D fields, a new method to generate flow textures with non-uniform streamlets is proposed in this paper. In the method, a control grid is built and divided into different regions that reflect the complexity of the underlying flow field. The resulting regions are then used to control to the distribution of droplets and the corresponding calculation of streamlets involved in OLIC. By this means, streamlets are dense around salient flow features, while in other areas they are sparse. The disparity of streamlets highlights features in flow fields. Test results show that our method can achieve pleasing visual effect. It is flexible and superior to the original OLIC.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to emphasize features in flow textures while preserving the orientation of flows in 2D fields, a new method to generate flow textures with non-uniform streamlets is proposed in this paper. In the method, a control grid is built and divided into different regions that reflect the complexity of the underlying flow field. The resulting regions are then used to control to the distribution of droplets and the corresponding calculation of streamlets involved in OLIC. By this means, streamlets are dense around salient flow features, while in other areas they are sparse. The disparity of streamlets highlights features in flow fields. Test results show that our method can achieve pleasing visual effect. It is flexible and superior to the original OLIC.", "fno": "06746507", "keywords": [ "Streaming Media", "Complexity Theory", "Convolution", "Visualization", "Vectors", "Visual Effects", "Kernel", "Flow Visualization", "Features", "Textures", "OLIC" ], "authors": [ { "affiliation": null, "fullName": "Beichen Liu", "givenName": "Beichen", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wenyao Zhang", "givenName": "Wenyao", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yueguang Wang", "givenName": "Yueguang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuezhu Pei", "givenName": "Yuezhu", "surname": "Pei", "__typename": "ArticleAuthorType" } ], "idPrefix": "cis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-12-01T00:00:00", "pubType": "proceedings", "pages": "635-639", "year": "2013", "issn": null, "isbn": "978-1-4799-2549-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06746506", "articleId": "12OmNviHKhz", "__typename": "AdjacentArticleType" }, "next": { "fno": "06746508", "articleId": "12OmNAS9zxr", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300017", "title": "Image Based Flow Visualization for Curved Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300017/12OmNBE7MqI", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270355", "title": "Advecting Procedural Textures for 2D Flow Animation", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270355/12OmNCgrCWy", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742376", "title": "View point evaluation and streamline filtering for flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742376/12OmNqyDjoV", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imsccs/2007/3039/0/30390292", "title": "Enhanced Unsteady Flow Visualization", "doi": null, "abstractUrl": "/proceedings-article/imsccs/2007/30390292/12OmNxTmHJC", "parentPublication": { "id": "proceedings/imsccs/2007/3039/0", "title": "2007 Second International Multisymposium on Computer and Computational Sciences - IMSCCS '07", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a041", "title": "Moment Invariants for 2D Flow Fields Using Normalization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a041/12OmNyRg4tb", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/08/06951493", "title": "Moment Invariants for 2D Flow Fields via Normalization in Detail", "doi": null, "abstractUrl": "/journal/tg/2015/08/06951493/13rRUwI5U2J", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/09/ttg2013091476", "title": "Image-Space Texture-Based Output-Coherent Surface Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2013/09/ttg2013091476/13rRUwghd98", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/05/mcg2012050012", "title": "2011 IEEE Visualization Contest Winner: Visualizing Unsteady Vortical Behavior of a Centrifugal Pump", "doi": null, "abstractUrl": "/magazine/cg/2012/05/mcg2012050012/13rRUyoPSRy", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/01423110", "title": "LIC For Surface Flow Feature Detection", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/01423110/1h0N2BsyqDm", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismii/2021/1290/0/129000a252", "title": "Research on the visualization method of two dimensions underwater flow simulation", "doi": null, "abstractUrl": "/proceedings-article/ismii/2021/129000a252/1sZ2LY99qFy", "parentPublication": { "id": "proceedings/ismii/2021/1290/0", "title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uOujNlJyko", "title": "2021 International Conference on Computer Technology and Media Convergence Design (CTMCD)", "acronym": "ctmcd", "groupId": "1841984", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uOukEGkOxW", "doi": "10.1109/CTMCD53128.2021.00046", "title": "The Mutual Development of Color Design and Application in New Media Design", "normalizedTitle": "The Mutual Development of Color Design and Application in New Media Design", "abstract": "New media design is also a kind of advertising design that conforms to The Times, so color is undoubtedly the most influential and appealing factor. It should be matched with different colors according to different themes. Color has the function of conveying information and expressing emotions. Mining the expression of advertising color, design a strong visual effect, with new works, clever use of color, eye-catching, harmonious, beautiful screen effect, so that the advertising more colorful, more can move the audience. Based on this, this paper discusses the mutual development of color design and application in new media design.", "abstracts": [ { "abstractType": "Regular", "content": "New media design is also a kind of advertising design that conforms to The Times, so color is undoubtedly the most influential and appealing factor. It should be matched with different colors according to different themes. Color has the function of conveying information and expressing emotions. Mining the expression of advertising color, design a strong visual effect, with new works, clever use of color, eye-catching, harmonious, beautiful screen effect, so that the advertising more colorful, more can move the audience. Based on this, this paper discusses the mutual development of color design and application in new media design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "New media design is also a kind of advertising design that conforms to The Times, so color is undoubtedly the most influential and appealing factor. It should be matched with different colors according to different themes. Color has the function of conveying information and expressing emotions. Mining the expression of advertising color, design a strong visual effect, with new works, clever use of color, eye-catching, harmonious, beautiful screen effect, so that the advertising more colorful, more can move the audience. Based on this, this paper discusses the mutual development of color design and application in new media design.", "fno": "485600a187", "keywords": [ "Advertising Data Processing", "Data Mining", "Data Visualisation", "Human Factors", "Color Design", "New Media Design", "Advertising Design", "Advertising Color Expression Mining", "Psychology", "Color", "Media", "Visual Effects", "Advertising", "Convergence", "New Media Design", "Color Application", "Color Design", "Mutual Development" ], "authors": [ { "affiliation": "Quanzhou Arts And Crafts Vocational College,School of Fujian,Fujian,China,362500", "fullName": "Lin Bizhu", "givenName": "Lin", "surname": "Bizhu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ctmcd", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-04-01T00:00:00", "pubType": "proceedings", "pages": "187-191", "year": "2021", "issn": null, "isbn": "978-1-6654-4856-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "485600a183", "articleId": "1uOulOdxbW0", "__typename": "AdjacentArticleType" }, "next": { "fno": "485600a192", "articleId": "1uOunj8kiLm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icis/2013/0174/0/06607815", "title": "Analysis of impression received from reverse perspective illusion to create innovative advertising", "doi": null, "abstractUrl": "/proceedings-article/icis/2013/06607815/12OmNB8kHXD", "parentPublication": { "id": "proceedings/icis/2013/0174/0", "title": "2013 IEEE/ACIS 12th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciii/2010/4279/1/4279a299", "title": "Based on the Investigation of the Audience in China's Network Advertising Analysis and Development Proposals", "doi": null, "abstractUrl": "/proceedings-article/iciii/2010/4279a299/12OmNC9lEG8", "parentPublication": { "id": "proceedings/iciii/2010/4279/1", "title": "International Conference on Information Management, Innovation Management and Industrial Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicic/2007/2882/0/28820058", "title": "A Media-Art Employing Virtual Shadows with Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/icicic/2007/28820058/12OmNvSKNTr", "parentPublication": { "id": "proceedings/icicic/2007/2882/0", "title": "2007 Second International Conference on Innovative Computing, Information and Control", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a062", "title": "Visual Color Design", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a062/12OmNwCaCqJ", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2015/7367/0/7367d405", "title": "What Drives Consumers to Click on Social Media Ads? The Roles of Content, Media, and Individual Factors", "doi": null, "abstractUrl": "/proceedings-article/hicss/2015/7367d405/12OmNzTH0Ti", "parentPublication": { "id": "proceedings/hicss/2015/7367/0", "title": "2015 48th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2016/1611/0/07822628", "title": "Inferring Social Influence of anti-Tobacco mass media campaigns", "doi": null, "abstractUrl": "/proceedings-article/bibm/2016/07822628/12OmNzayNBQ", "parentPublication": { "id": "proceedings/bibm/2016/1611/0", "title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/02/mcg2013020038", "title": "Beyond information and utility: Transforming public spaces with media facades", "doi": null, "abstractUrl": "/magazine/cg/2013/02/mcg2013020038/13rRUzp02qw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2020/4905/0/09049707", "title": "The Human Visual System Based Color QR Codes", "doi": null, "abstractUrl": "/proceedings-article/icnc/2020/09049707/1iERV3HYKBy", "parentPublication": { "id": "proceedings/icnc/2020/4905/0", "title": "2020 International Conference on Computing, Networking and Communications (ICNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciddt/2020/0367/0/036700a312", "title": "Application of Color in Innovative Digital Landscape Design", "doi": null, "abstractUrl": "/proceedings-article/iciddt/2020/036700a312/1wutBO1YjFm", "parentPublication": { "id": "proceedings/iciddt/2020/0367/0", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmsse/2021/2565/0/256500a196", "title": "A Literature Review of humor advertising in social media: Based on CiteSpace", "doi": null, "abstractUrl": "/proceedings-article/icmsse/2021/256500a196/1yNiWrk3oXe", "parentPublication": { "id": "proceedings/icmsse/2021/2565/0", "title": "2021 International Conference on Management Science and Software Engineering (ICMSSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uqGdWlamUo", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uqGuMZQdxu", "doi": "10.1109/WACV48630.2021.00082", "title": "A Learning-Based Approach to Parametric Rotoscoping of Multi-Shape Systems", "normalizedTitle": "A Learning-Based Approach to Parametric Rotoscoping of Multi-Shape Systems", "abstract": "Rotoscoping of facial features is often an integral part of Visual Effects post-production, where the parametric contours created by artists need to be highly detailed, consist of multiple interacting components, and involve significant manual supervision. Yet those assets are usually dis-carded after compositing and hardly reused. In this paper, we present the first methodology to learn from these assets. With only a few manually rotoscoped shots, we identify and extract semantically consistent and task specific landmark points and re-vectorize the roto shapes based on these land-marks. We then train two separate models &#x2013; one to predict landmarks based on a rough crop of the face region, and the other to predict the roto shapes using only the inferred landmarks from the first model. In preliminary production testing, 26% of shots rotoscoped using our tool were able to be used with no adjustment, and another 47% were able to be used with minor adjustments. This represents a significant time savings for the studio, as artists are able to rotoscope almost 73% of their shots with no manual roto-scoping and some spline adjustment. This paper presents a novel application of machine learning to professional interactive rotoscoping, a methodology to convert unstructured roto shapes into a self-annotated, trainable dataset that can be harnessed to make accurate predictions on future shots of a similar object, and a limited dataset of rotoscoped multi-shape fine feature systems from a real film production.", "abstracts": [ { "abstractType": "Regular", "content": "Rotoscoping of facial features is often an integral part of Visual Effects post-production, where the parametric contours created by artists need to be highly detailed, consist of multiple interacting components, and involve significant manual supervision. Yet those assets are usually dis-carded after compositing and hardly reused. In this paper, we present the first methodology to learn from these assets. With only a few manually rotoscoped shots, we identify and extract semantically consistent and task specific landmark points and re-vectorize the roto shapes based on these land-marks. We then train two separate models &#x2013; one to predict landmarks based on a rough crop of the face region, and the other to predict the roto shapes using only the inferred landmarks from the first model. In preliminary production testing, 26% of shots rotoscoped using our tool were able to be used with no adjustment, and another 47% were able to be used with minor adjustments. This represents a significant time savings for the studio, as artists are able to rotoscope almost 73% of their shots with no manual roto-scoping and some spline adjustment. This paper presents a novel application of machine learning to professional interactive rotoscoping, a methodology to convert unstructured roto shapes into a self-annotated, trainable dataset that can be harnessed to make accurate predictions on future shots of a similar object, and a limited dataset of rotoscoped multi-shape fine feature systems from a real film production.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Rotoscoping of facial features is often an integral part of Visual Effects post-production, where the parametric contours created by artists need to be highly detailed, consist of multiple interacting components, and involve significant manual supervision. Yet those assets are usually dis-carded after compositing and hardly reused. In this paper, we present the first methodology to learn from these assets. With only a few manually rotoscoped shots, we identify and extract semantically consistent and task specific landmark points and re-vectorize the roto shapes based on these land-marks. We then train two separate models – one to predict landmarks based on a rough crop of the face region, and the other to predict the roto shapes using only the inferred landmarks from the first model. In preliminary production testing, 26% of shots rotoscoped using our tool were able to be used with no adjustment, and another 47% were able to be used with minor adjustments. This represents a significant time savings for the studio, as artists are able to rotoscope almost 73% of their shots with no manual roto-scoping and some spline adjustment. This paper presents a novel application of machine learning to professional interactive rotoscoping, a methodology to convert unstructured roto shapes into a self-annotated, trainable dataset that can be harnessed to make accurate predictions on future shots of a similar object, and a limited dataset of rotoscoped multi-shape fine feature systems from a real film production.", "fno": "047700a776", "keywords": [ "Computer Animation", "Face Recognition", "Feature Extraction", "Image Motion Analysis", "Image Segmentation", "Interactive Systems", "Learning Artificial Intelligence", "Splines Mathematics", "Video Signal Processing", "Learning Based Approach", "Parametric Rotoscoping", "Multishape Systems", "Facial Features", "Visual Effects Post Production", "Parametric Contours", "Artists", "Multiple Interacting Components", "Significant Manual Supervision", "Manually Rotoscoped Shots", "Semantically Consistent Task Specific Landmark Points", "Land Marks", "Separate Models", "Rough Crop", "Face Region", "Inferred Landmarks", "Preliminary Production Testing", "Significant Time Savings", "Manual Roto Scoping", "Spline Adjustment", "Professional Interactive Rotoscoping", "Unstructured Roto Shapes", "Future Shots", "Rotoscoped Multishape Fine Feature Systems", "Film Production", "Shape", "Production", "Manuals", "Machine Learning", "Predictive Models", "Tools", "Visual Effects" ], "authors": [ { "affiliation": "Intel Corporation", "fullName": "Luis Bermudez", "givenName": "Luis", "surname": "Bermudez", "__typename": "ArticleAuthorType" }, { "affiliation": "Intel Corporation", "fullName": "Nadine Dabby", "givenName": "Nadine", "surname": "Dabby", "__typename": "ArticleAuthorType" }, { "affiliation": "Intel Corporation", "fullName": "Yingxi Adelle Lin", "givenName": "Yingxi Adelle", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "Intel Corporation", "fullName": "Sara Hilmarsdottir", "givenName": "Sara", "surname": "Hilmarsdottir", "__typename": "ArticleAuthorType" }, { "affiliation": "Intel Corporation", "fullName": "Narayan Sundararajan", "givenName": "Narayan", "surname": "Sundararajan", "__typename": "ArticleAuthorType" }, { "affiliation": "Intel Corporation", "fullName": "Swarnendu Kar", "givenName": "Swarnendu", "surname": "Kar", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "776-785", "year": "2021", "issn": null, "isbn": "978-1-6654-0477-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "047700a766", "articleId": "1uqGyfAAyju", "__typename": "AdjacentArticleType" }, "next": { "fno": "047700a786", "articleId": "1uqGvLJqR5m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2009/3994/0/05204053", "title": "Bicycle chain shape models", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204053/12OmNBQ2VZI", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a717", "title": "Learning Shape Trends: Parameter Estimation in Diffusions on Shape Manifolds", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a717/12OmNs59K08", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c656", "title": "Non-parametric 3D Shape Warping", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c656/12OmNx3HI2d", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460639", "title": "Placing landmarks suitably for shape analysis by optimization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460639/12OmNy3RRxa", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/04/ttp2010040579", "title": "Nonstationary Shape Activities: Dynamic Models for Landmark Shape Change and Applications", "doi": null, "abstractUrl": "/journal/tp/2010/04/ttp2010040579/13rRUNvgzjx", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07930445", "title": "Globally Consistent Wrinkle-Aware Shading of Line Drawings", "doi": null, "abstractUrl": "/journal/tg/2018/07/07930445/13rRUwbs2gy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/08/08666808", "title": "ROAM: A Rich Object Appearance Model with Application to Rotoscoping", "doi": null, "abstractUrl": "/journal/tp/2020/08/08666808/18mLzzeeRsk", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2819", "title": "GIFS: Neural Implicit Function for General Shape Representation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2819/1H0KHzfm1Ta", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a558", "title": "Deep Parametric Shape Predictions Using Distance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a558/1m3o4bZBl1C", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icectt/2020/9928/0/992800a203", "title": "The Conversion of the Production Mode of Film Green Screen Visual Effects in the Setting of 5G Technology", "doi": null, "abstractUrl": "/proceedings-article/icectt/2020/992800a203/1oa5iiXGv1C", "parentPublication": { "id": "proceedings/icectt/2020/9928/0", "title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wutzGkF9Zu", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "acronym": "iciddt", "groupId": "1841164", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1wutKu3uYr6", "doi": "10.1109/ICIDDT52279.2020.00014", "title": "Research and Practice of Traditional Handicraft Interactive Learning Mode Based on User Interaction Behavior", "normalizedTitle": "Research and Practice of Traditional Handicraft Interactive Learning Mode Based on User Interaction Behavior", "abstract": "After investigation and analysis of traditional handicraft digital applications, it is found that most applications pay more attention to digital content presentation and visual effects, and lack of practice and research on the display methods and interactive behavior design of traditional handicraft content with practical characteristics. Under the guidance of behavioral and psychological theories, follow the principle that the design should be consistent with human cognition and behavior habits, and combine the core elements in interactive behaviors to build and construct a traditional handicraft interaction mode that is consistent with user behavior logic. This article combines the five elements of interactive behavior (people, purpose, action, tool / media, and scene) with the traditional handicrafts and inheritance content, redefining participants, positioning behavioral motivation, planning behavioral processes, seeking new methods, and creating new scenarios to reconstruct interactive behaviors and build new models for interactive learning experiences with traditional handicrafts. Finally, take Cantonese Porcelain as an example to design the interactive learning mode. Then, through User interviews and experience evaluations, learn more about users' feelings and expectations for this model.", "abstracts": [ { "abstractType": "Regular", "content": "After investigation and analysis of traditional handicraft digital applications, it is found that most applications pay more attention to digital content presentation and visual effects, and lack of practice and research on the display methods and interactive behavior design of traditional handicraft content with practical characteristics. Under the guidance of behavioral and psychological theories, follow the principle that the design should be consistent with human cognition and behavior habits, and combine the core elements in interactive behaviors to build and construct a traditional handicraft interaction mode that is consistent with user behavior logic. This article combines the five elements of interactive behavior (people, purpose, action, tool / media, and scene) with the traditional handicrafts and inheritance content, redefining participants, positioning behavioral motivation, planning behavioral processes, seeking new methods, and creating new scenarios to reconstruct interactive behaviors and build new models for interactive learning experiences with traditional handicrafts. Finally, take Cantonese Porcelain as an example to design the interactive learning mode. Then, through User interviews and experience evaluations, learn more about users' feelings and expectations for this model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "After investigation and analysis of traditional handicraft digital applications, it is found that most applications pay more attention to digital content presentation and visual effects, and lack of practice and research on the display methods and interactive behavior design of traditional handicraft content with practical characteristics. Under the guidance of behavioral and psychological theories, follow the principle that the design should be consistent with human cognition and behavior habits, and combine the core elements in interactive behaviors to build and construct a traditional handicraft interaction mode that is consistent with user behavior logic. This article combines the five elements of interactive behavior (people, purpose, action, tool / media, and scene) with the traditional handicrafts and inheritance content, redefining participants, positioning behavioral motivation, planning behavioral processes, seeking new methods, and creating new scenarios to reconstruct interactive behaviors and build new models for interactive learning experiences with traditional handicrafts. Finally, take Cantonese Porcelain as an example to design the interactive learning mode. Then, through User interviews and experience evaluations, learn more about users' feelings and expectations for this model.", "fno": "036700a039", "keywords": [ "Cognition", "Computer Aided Instruction", "Human Factors", "Interactive Systems", "Psychology", "User Interfaces", "Human Cognition", "Behavior Habits", "Traditional Handicraft Interaction Mode", "User Behavior Logic", "Behavioral Motivation", "Behavioral Processes", "User Interaction Behavior", "Traditional Handicraft Digital Applications", "Digital Content Presentation", "Interactive Behavior Design", "Handicraft Content", "Traditional Handicraft Interactive Learning Mode", "Cantonese Porcelain", "Technological Innovation", "Porcelain", "Psychology", "Manuals", "Tools", "Media", "Visual Effects", "Interactive Behavior", "Web AR", "Traditional Handicraft", "Intangible Cultural Heritage" ], "authors": [ { "affiliation": "Guangdong University of Technology,School of Design Arts,Guangdong,China", "fullName": "Yutong Liu", "givenName": "Yutong", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Guangdong University of Technology,School of Design Arts,Guangdong,China", "fullName": "Yi Ji", "givenName": "Yi", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": "Guangdong University of Technology,School of Design Arts,Guangdong,China", "fullName": "Yuan Wei", "givenName": "Yuan", "surname": "Wei", "__typename": "ArticleAuthorType" }, { "affiliation": "Guangdong University of Technology,School of Design Arts,Guangdong,China", "fullName": "Sean Clark", "givenName": "Sean", "surname": "Clark", "__typename": "ArticleAuthorType" }, { "affiliation": "Jieyang College of Technology,School of Design Arts,Jieyang,China", "fullName": "Dongjin Lin", "givenName": "Dongjin", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "iciddt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "39-43", "year": "2020", "issn": null, "isbn": "978-1-6654-0367-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "036700a032", "articleId": "1wutDoaVl2o", "__typename": "AdjacentArticleType" }, "next": { "fno": "036700a044", "articleId": "1wutJQ2leta", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/issre/2008/3405/0/3405a301", "title": "An Analysis Framework for Inter-system Interaction Behavior", "doi": null, "abstractUrl": "/proceedings-article/issre/2008/3405a301/12OmNvT2p4c", "parentPublication": { "id": "proceedings/issre/2008/3405/0", "title": "2008 19th International Symposium on Software Reliability Engineering (ISSRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553789", "title": "Automatic behavior descriptors for psychological disorder analysis", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553789/12OmNwMob8L", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-computing/2013/5047/0/5047a165", "title": "Learning from Traditional Dynamic Arts: Elements for Interaction Design", "doi": null, "abstractUrl": "/proceedings-article/culture-computing/2013/5047a165/12OmNx76TEp", "parentPublication": { "id": "proceedings/culture-computing/2013/5047/0", "title": "2013 International Conference on Culture and Computing (Culture Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2013/03/mpc2013030066", "title": "Smartphones for Large-Scale Behavior Change Interventions", "doi": null, "abstractUrl": "/magazine/pc/2013/03/mpc2013030066/13rRUwI5U06", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122719", "title": "Interaction Support for Visual Comparison Inspired by Natural Behavior", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122719/13rRUxZRbo0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09666969", "title": "Beyond the Words: Analysis and Detection of Self-Disclosure Behavior during Robot Positive Psychology Interaction", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09666969/1A6BnZefN4Y", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2020/1075/0/09155736", "title": "Selenium based Testing Systems for Analytical Data Generation of Website User Behavior", "doi": null, "abstractUrl": "/proceedings-article/icstw/2020/09155736/1m1jlsf4MBW", "parentPublication": { "id": "proceedings/icstw/2020/1075/0", "title": "2020 IEEE International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciddt/2020/0367/0/036700a286", "title": "Application of online trading market in rural handicraft protection design strategy : &#x2014;&#x2014;Take \"Fang yuan\" app handicraft exchange trading platform as an example", "doi": null, "abstractUrl": "/proceedings-article/iciddt/2020/036700a286/1wutDbgaV6o", "parentPublication": { "id": "proceedings/iciddt/2020/0367/0", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciddt/2020/0367/0/036700a553", "title": "Research on Personalized Recommendation System of Traditional Handicraft Based on Semantic Ontology", "doi": null, "abstractUrl": "/proceedings-article/iciddt/2020/036700a553/1wutGezcGu4", "parentPublication": { "id": "proceedings/iciddt/2020/0367/0", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciddt/2020/0367/0/036700a537", "title": "A Framework of Traditional Handicraft Creative Practice towards Human-Engaged Computing", "doi": null, "abstractUrl": "/proceedings-article/iciddt/2020/036700a537/1wutHmjHv8Y", "parentPublication": { "id": "proceedings/iciddt/2020/0367/0", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeK50WP42I", "doi": "10.1109/CVPR46437.2021.01188", "title": "PQA: Perceptual Question Answering", "normalizedTitle": "PQA: Perceptual Question Answering", "abstract": "Perceptual organization remains one of the very few established theories on the human visual system. It underpinned many pre-deep seminal works on segmentation and detection, yet research has seen a rapid decline since the preferential shift to learning deep models. Of the limited attempts, most aimed at interpreting complex visual scenes using perceptual organizational rules. This has however been proven to be sub-optimal, since models were unable to effectively capture the visual complexity in real-world imagery. In this paper, we rejuvenate the study of perceptual organization, by advocating two positional changes: (i) we examine purposefully generated synthetic data, instead of complex real imagery, and (ii) we ask machines to synthesize novel perceptually-valid patterns, instead of explaining existing data. Our overall answer lies with the introduction of a novel visual challenge &#x2013; the challenge of perceptual question answering (PQA). Upon observing example perceptual question-answer pairs, the goal for PQA is to solve similar questions by generating answers entirely from scratch (see Figure 1). Our first contribution is therefore the first dataset of perceptual question-answer pairs, each generated specifically for a particular Gestalt principle. We then borrow insights from human psychology to design an agent that casts perceptual organization as a self-attention problem, where a proposed grid-to-grid mapping network directly generates answer patterns from scratch. Experiments show our agent to outperform a selection of naive and strong baselines. A human study however indicates that ours uses astronomically more data to learn when compared to an average human, necessitating future research (with or without our dataset).", "abstracts": [ { "abstractType": "Regular", "content": "Perceptual organization remains one of the very few established theories on the human visual system. It underpinned many pre-deep seminal works on segmentation and detection, yet research has seen a rapid decline since the preferential shift to learning deep models. Of the limited attempts, most aimed at interpreting complex visual scenes using perceptual organizational rules. This has however been proven to be sub-optimal, since models were unable to effectively capture the visual complexity in real-world imagery. In this paper, we rejuvenate the study of perceptual organization, by advocating two positional changes: (i) we examine purposefully generated synthetic data, instead of complex real imagery, and (ii) we ask machines to synthesize novel perceptually-valid patterns, instead of explaining existing data. Our overall answer lies with the introduction of a novel visual challenge &#x2013; the challenge of perceptual question answering (PQA). Upon observing example perceptual question-answer pairs, the goal for PQA is to solve similar questions by generating answers entirely from scratch (see Figure 1). Our first contribution is therefore the first dataset of perceptual question-answer pairs, each generated specifically for a particular Gestalt principle. We then borrow insights from human psychology to design an agent that casts perceptual organization as a self-attention problem, where a proposed grid-to-grid mapping network directly generates answer patterns from scratch. Experiments show our agent to outperform a selection of naive and strong baselines. A human study however indicates that ours uses astronomically more data to learn when compared to an average human, necessitating future research (with or without our dataset).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Perceptual organization remains one of the very few established theories on the human visual system. It underpinned many pre-deep seminal works on segmentation and detection, yet research has seen a rapid decline since the preferential shift to learning deep models. Of the limited attempts, most aimed at interpreting complex visual scenes using perceptual organizational rules. This has however been proven to be sub-optimal, since models were unable to effectively capture the visual complexity in real-world imagery. In this paper, we rejuvenate the study of perceptual organization, by advocating two positional changes: (i) we examine purposefully generated synthetic data, instead of complex real imagery, and (ii) we ask machines to synthesize novel perceptually-valid patterns, instead of explaining existing data. Our overall answer lies with the introduction of a novel visual challenge – the challenge of perceptual question answering (PQA). Upon observing example perceptual question-answer pairs, the goal for PQA is to solve similar questions by generating answers entirely from scratch (see Figure 1). Our first contribution is therefore the first dataset of perceptual question-answer pairs, each generated specifically for a particular Gestalt principle. We then borrow insights from human psychology to design an agent that casts perceptual organization as a self-attention problem, where a proposed grid-to-grid mapping network directly generates answer patterns from scratch. Experiments show our agent to outperform a selection of naive and strong baselines. A human study however indicates that ours uses astronomically more data to learn when compared to an average human, necessitating future research (with or without our dataset).", "fno": "450900m2051", "keywords": [ "Image Segmentation", "Learning Artificial Intelligence", "Question Answering Information Retrieval", "Visual Perception", "PQA", "Human Visual System", "Pre Deep Seminal Works", "Complex Visual Scenes", "Perceptual Organizational Rules", "Visual Complexity", "Complex Real Imagery", "Perceptual Question Answer Pairs", "Gestalt Principle", "Grid To Grid Mapping Network", "Visualization", "Training Data", "Psychology", "Organizations", "Visual Systems", "Knowledge Discovery", "Data Models" ], "authors": [ { "affiliation": "Beijing University of Posts and Telecommunications,CN", "fullName": "Yonggang Qi", "givenName": "Yonggang", "surname": "Qi", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing University of Posts and Telecommunications,CN", "fullName": "Kai Zhang", "givenName": "Kai", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Surrey,SketchX, Cvssp,UK", "fullName": "Aneeshan Sain", "givenName": "Aneeshan", "surname": "Sain", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Surrey,SketchX, Cvssp,UK", "fullName": "Yi-Zhe Song", "givenName": "Yi-Zhe", "surname": "Song", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "12051-12059", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeK4XHhRGU", "name": "pcvpr202145090-09578331s1-mm_450900m2051.zip", "size": "1.58 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578331s1-mm_450900m2051.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900m2040", "articleId": "1yeJFZa3Kne", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900m2060", "articleId": "1yeJlFJ00Du", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851e995", "title": "Visual7W: Grounded Question Answering in Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851e995/12OmNBLvlL9", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/08099929", "title": "Multi-level Attention Networks for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/08099929/12OmNBSjIWf", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c425", "title": "VQA: Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c425/12OmNrYlmBL", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2018/1857/0/185701a170", "title": "Affective Visual Question Answering Network", "doi": null, "abstractUrl": "/proceedings-article/mipr/2018/185701a170/12OmNxFaLwD", "parentPublication": { "id": "proceedings/mipr/2018/1857/0", "title": "2018 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/10/08046084", "title": "FVQA: Fact-Based Visual Question Answering", "doi": null, "abstractUrl": "/journal/tp/2018/10/08046084/13rRUwInvC9", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a001", "title": "Embodied Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a001/17D45WZZ7Ct", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g135", "title": "Focal Visual-Text Attention for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g135/17D45XdBRQV", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020633", "title": "Perceptual-IQ: Visual Commonsense Reasoning about Perceptual Imagination", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020633/1KfSnsX79FS", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102814", "title": "Rankvqa: Answer Re-Ranking For Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102814/1kwqOowCTII", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413078", "title": "Answer-checking in Context: A Multi-modal Fully Attention Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413078/1tmjXl2iNgI", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvoWV3D", "title": "The 9th Annual IEEE Symposium on Field-Programmable Custom Computing Machines", "acronym": "fpgm", "groupId": "1000289", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNAq3hO7", "doi": "", "title": "Reconfigurable Designs for Ray Tracing", "normalizedTitle": "Reconfigurable Designs for Ray Tracing", "abstract": "We describe a feasibility study into using reconfigurable hardware for real-time ray tracing. The study includes mapping time-consuming parts of the algorithm into hardware, and transforming the algorithm following a breadth-first approach to improve system performance when the host bus is slow. We also examine the application of runtime reconfiguration, and estimate the reconfigurable resources required for animating complex scenes.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a feasibility study into using reconfigurable hardware for real-time ray tracing. The study includes mapping time-consuming parts of the algorithm into hardware, and transforming the algorithm following a breadth-first approach to improve system performance when the host bus is slow. We also examine the application of runtime reconfiguration, and estimate the reconfigurable resources required for animating complex scenes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a feasibility study into using reconfigurable hardware for real-time ray tracing. The study includes mapping time-consuming parts of the algorithm into hardware, and transforming the algorithm following a breadth-first approach to improve system performance when the host bus is slow. We also examine the application of runtime reconfiguration, and estimate the reconfigurable resources required for animating complex scenes.", "fno": "01420938", "keywords": [ "Ray Tracing", "Hardware", "Layout", "Testing", "Computer Graphics", "Runtime", "Animation", "Rendering Computer Graphics", "Costs", "Educational Institutions" ], "authors": [ { "affiliation": "Imperial College", "fullName": "T. Todman", "givenName": "T.", "surname": "Todman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "W. Luk", "givenName": "W.", "surname": "Luk", "__typename": "ArticleAuthorType" } ], "idPrefix": "fpgm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-01-01T00:00:00", "pubType": "proceedings", "pages": "300,301", "year": "2001", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01420937", "articleId": "12OmNqyUUud", "__typename": "AdjacentArticleType" }, "next": { "fno": "01420939", "articleId": "12OmNvlxJyW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rt/2008/2741/0/04634641", "title": "A straightforward CUDA implementation for interactive ray-tracing", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634641/12OmNAY79ml", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061539", "title": "Ray Tracing for the Movie `Cars'", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061539/12OmNBBzoiL", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2003/1870/1/01253820", "title": "Interactive ray tracing on reconfigurable SIMD morphosys", "doi": null, "abstractUrl": "/proceedings-article/date/2003/01253820/12OmNBOllgC", "parentPublication": { "id": "proceedings/date/2003/1870/1", "title": "Design, Automation &amp; Test in Europe Conference &amp; Exhibition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2013/5099/0/5099a258", "title": "Dynamic Per Object Ray Caching Textures for Real-Time Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a258/12OmNCfAPL8", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2003/1870/0/01253820", "title": "Interactive ray tracing on reconfigurable SIMD morphosys", "doi": null, "abstractUrl": "/proceedings-article/date/2003/01253820/12OmNqJq4q4", "parentPublication": { "id": "proceedings/date/2003/1870/0", "title": "Design, Automation &amp; Test in Europe Conference &amp; Exhibition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2003/1870/2/01253820", "title": "Interactive ray tracing on reconfigurable SIMD morphosys", "doi": null, "abstractUrl": "/proceedings-article/date/2003/01253820/12OmNxvwoNh", "parentPublication": { "id": "proceedings/date/2003/1870/2", "title": "Design, Automation &amp; Test in Europe Conference &amp; Exhibition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634622", "title": "Coherent ray tracing via stream filtering", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634622/12OmNxxdZDN", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/04/v0316", "title": "Breadth-First Ray Tracing Utilizing Uniform Spatial Subdivision", "doi": null, "abstractUrl": "/journal/tg/1997/04/v0316/13rRUxBa5ne", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300a043", "title": "RTA: an Efficient SIMD Architecture for Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300a043/1LSPd946Y1y", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcxZeM", "title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)", "acronym": "icpads", "groupId": "1000534", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdVh0t", "doi": "10.1109/PADSW.2014.7097851", "title": "Toward multi-target autotuning for accelerators", "normalizedTitle": "Toward multi-target autotuning for accelerators", "abstract": "Producing high-performance implementations from simple, portable computation specifications is a challenge that compilers have tried to address for several decades. More recently, a relatively stable architectural landscape has evolved into a set of increasingly diverging and rapidly changing CPU and accelerator designs, with the main common factor being dramatic increases in the levels of parallelism available. The growth of architectural heterogeneity and parallelism, combined with the very slow development cycles of traditional compilers, has motivated the development of autotuning tools that can quickly respond to changes in architectures and programming models, and enable very specialized optimizations that are not possible or likely to be provided by mainstream compilers. In this paper we describe the new OpenCL code generator and autotuner OrCL and the introduction of detailed performance measurement into the autotuning process. OrCL is implemented within the Orio autotuning framework, which enables the rapid development of experimental languages and code optimization strategies aimed at achieving good performance on new platforms without rewriting or hand-optimizing critical kernels. The combination of the new OpenCL autotuning and TAU measurement capabilities enables users to consistently evaluate autotuning effectiveness across a range of architectures, including several NVIDIA and AMD accelerators and Intel Xeon Phi processors, and to compare the OpenCL and CUDA code generation capabilities. We present results of autotuning several numerical kernels that typically dominate the execution time of iterative sparse linear system solution and key computations from a 3-D parallel simulation of solid fuel ignition.", "abstracts": [ { "abstractType": "Regular", "content": "Producing high-performance implementations from simple, portable computation specifications is a challenge that compilers have tried to address for several decades. More recently, a relatively stable architectural landscape has evolved into a set of increasingly diverging and rapidly changing CPU and accelerator designs, with the main common factor being dramatic increases in the levels of parallelism available. The growth of architectural heterogeneity and parallelism, combined with the very slow development cycles of traditional compilers, has motivated the development of autotuning tools that can quickly respond to changes in architectures and programming models, and enable very specialized optimizations that are not possible or likely to be provided by mainstream compilers. In this paper we describe the new OpenCL code generator and autotuner OrCL and the introduction of detailed performance measurement into the autotuning process. OrCL is implemented within the Orio autotuning framework, which enables the rapid development of experimental languages and code optimization strategies aimed at achieving good performance on new platforms without rewriting or hand-optimizing critical kernels. The combination of the new OpenCL autotuning and TAU measurement capabilities enables users to consistently evaluate autotuning effectiveness across a range of architectures, including several NVIDIA and AMD accelerators and Intel Xeon Phi processors, and to compare the OpenCL and CUDA code generation capabilities. We present results of autotuning several numerical kernels that typically dominate the execution time of iterative sparse linear system solution and key computations from a 3-D parallel simulation of solid fuel ignition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Producing high-performance implementations from simple, portable computation specifications is a challenge that compilers have tried to address for several decades. More recently, a relatively stable architectural landscape has evolved into a set of increasingly diverging and rapidly changing CPU and accelerator designs, with the main common factor being dramatic increases in the levels of parallelism available. The growth of architectural heterogeneity and parallelism, combined with the very slow development cycles of traditional compilers, has motivated the development of autotuning tools that can quickly respond to changes in architectures and programming models, and enable very specialized optimizations that are not possible or likely to be provided by mainstream compilers. In this paper we describe the new OpenCL code generator and autotuner OrCL and the introduction of detailed performance measurement into the autotuning process. OrCL is implemented within the Orio autotuning framework, which enables the rapid development of experimental languages and code optimization strategies aimed at achieving good performance on new platforms without rewriting or hand-optimizing critical kernels. The combination of the new OpenCL autotuning and TAU measurement capabilities enables users to consistently evaluate autotuning effectiveness across a range of architectures, including several NVIDIA and AMD accelerators and Intel Xeon Phi processors, and to compare the OpenCL and CUDA code generation capabilities. We present results of autotuning several numerical kernels that typically dominate the execution time of iterative sparse linear system solution and key computations from a 3-D parallel simulation of solid fuel ignition.", "fno": "07097851", "keywords": [ "Kernel", "Graphics Processing Units", "Performance Evaluation", "Optimization", "Computer Architecture", "Generators", "Hardware", "Accelerators", "Open CL", "TAU", "Autotuning", "GP Us" ], "authors": [ { "affiliation": "Department of Computer and Information Science, University of Oregon, Eugene, 97403, USA", "fullName": "Nick Chaimov", "givenName": "Nick", "surname": "Chaimov", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer and Information Science, University of Oregon, Eugene, 97403, USA", "fullName": "Boyana Norris", "givenName": "Boyana", "surname": "Norris", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer and Information Science, University of Oregon, Eugene, 97403, USA", "fullName": "Allen Malony", "givenName": "Allen", "surname": "Malony", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpads", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-12-01T00:00:00", "pubType": "proceedings", "pages": "534-541", "year": "2014", "issn": null, "isbn": "978-1-4799-7615-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07097850", "articleId": "12OmNzlD9fh", "__typename": "AdjacentArticleType" }, "next": { "fno": "07097852", "articleId": "12OmNwc3wB1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpp/2017/1042/0/1042a523", "title": "Autotuning GPU Kernels via Static and Predictive Analysis", "doi": null, "abstractUrl": "/proceedings-article/icpp/2017/1042a523/12OmNwM6A6z", "parentPublication": { "id": "proceedings/icpp/2017/1042/0", "title": "2017 46th International Conference on Parallel Processing (ICPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pact/2015/9524/0/9524a138", "title": "PENCIL: A Platform-Neutral Compute Intermediate Language for Accelerator Programming", "doi": null, "abstractUrl": "/proceedings-article/pact/2015/9524a138/12OmNx6xHkM", "parentPublication": { "id": "proceedings/pact/2015/9524/0", "title": "2015 International Conference on Parallel Architecture and Compilation (PACT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pdp/2014/2729/0/2729a672", "title": "A Portable and High-Performance General Matrix-Multiply (GEMM) Library for GPUs and Single-Chip CPU/GPU Systems", "doi": null, "abstractUrl": "/proceedings-article/pdp/2014/2729a672/12OmNxGj9XV", "parentPublication": { "id": "proceedings/pdp/2014/2729/0", "title": "2014 22nd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbac-pad/2014/6905/0/6905a112", "title": "Leveraging OmpSs to Exploit Hardware Accelerators", "doi": null, "abstractUrl": "/proceedings-article/sbac-pad/2014/6905a112/12OmNyQ7FYd", "parentPublication": { "id": "proceedings/sbac-pad/2014/6905/0", "title": "2014 26th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2015/7684/0/7684a699", "title": "Understanding Performance Portability of OpenACC for Supercomputers", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2015/7684a699/12OmNyRPgGS", "parentPublication": { "id": "proceedings/ipdpsw/2015/7684/0", "title": "2015 IEEE International Parallel and Distributed Processing Symposium Workshop (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2014/5976/0/07116910", "title": "Smart multi-task scheduling for OpenCL programs on CPU/GPU heterogeneous platforms", "doi": null, "abstractUrl": "/proceedings-article/hipc/2014/07116910/12OmNyo1nVn", "parentPublication": { "id": "proceedings/hipc/2014/5976/0", "title": "2014 21st International Conference on High Performance Computing (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/06/08843948", "title": "Optimal Kernel Design for Finite-Element Numerical Integration on GPUs", "doi": null, "abstractUrl": "/magazine/cs/2020/06/08843948/1dqspanAyFW", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/p3hpc/2019/6003/0/600300a026", "title": "Performance Portability of Multi-Material Kernels", "doi": null, "abstractUrl": "/proceedings-article/p3hpc/2019/600300a026/1gjRScgPWxO", "parentPublication": { "id": "proceedings/p3hpc/2019/6003/0", "title": "2019 IEEE/ACM International Workshop on Performance, Portability and Productivity in HPC (P3HPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005555", "title": "A Case Study of k-means Clustering using SYCL", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005555/1hJsais80SI", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2020/7445/0/09150427", "title": "Exploring a multi-resolution GPU programming model for Chapel", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2020/09150427/1lPGAkuSosE", "parentPublication": { "id": "proceedings/ipdpsw/2020/7445/0", "title": "2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBpmDFe", "title": "2016 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "acronym": "ipdps", "groupId": "1000530", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxeuta0", "doi": "10.1109/IPDPS.2016.31", "title": "Online-Autotuning of Parallel SAH kD-Trees", "normalizedTitle": "Online-Autotuning of Parallel SAH kD-Trees", "abstract": "We explore the benefits of using online-autotuning to find an optimal configuration for the parallel construction of Surface Area Heuristic (SAH) kD-trees. Using a quickly converging autotuning mechanism, we achieve a significant performance improvement of up to 1.96x. The SAH kD-tree is a spatial data structure and a fundamental tool in the domain of computer graphics and simulations. The parallel construction of these trees is influenced by several parameters, controlling various aspects of the algorithm. However, the parameter configurations advocated in the literature are hardly ever portable. To boost portability, we apply onlineautotuning to four state-of-the-art variants of parallel kD-tree construction. We show that speedups over the variants' standard configurations are possible with low programmer effort. We further demonstrate the performance portability of our approach by evaluating performance on varying multicore platforms and both static and dynamic geometries.", "abstracts": [ { "abstractType": "Regular", "content": "We explore the benefits of using online-autotuning to find an optimal configuration for the parallel construction of Surface Area Heuristic (SAH) kD-trees. Using a quickly converging autotuning mechanism, we achieve a significant performance improvement of up to 1.96x. The SAH kD-tree is a spatial data structure and a fundamental tool in the domain of computer graphics and simulations. The parallel construction of these trees is influenced by several parameters, controlling various aspects of the algorithm. However, the parameter configurations advocated in the literature are hardly ever portable. To boost portability, we apply onlineautotuning to four state-of-the-art variants of parallel kD-tree construction. We show that speedups over the variants' standard configurations are possible with low programmer effort. We further demonstrate the performance portability of our approach by evaluating performance on varying multicore platforms and both static and dynamic geometries.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We explore the benefits of using online-autotuning to find an optimal configuration for the parallel construction of Surface Area Heuristic (SAH) kD-trees. Using a quickly converging autotuning mechanism, we achieve a significant performance improvement of up to 1.96x. The SAH kD-tree is a spatial data structure and a fundamental tool in the domain of computer graphics and simulations. The parallel construction of these trees is influenced by several parameters, controlling various aspects of the algorithm. However, the parameter configurations advocated in the literature are hardly ever portable. To boost portability, we apply onlineautotuning to four state-of-the-art variants of parallel kD-tree construction. We show that speedups over the variants' standard configurations are possible with low programmer effort. We further demonstrate the performance portability of our approach by evaluating performance on varying multicore platforms and both static and dynamic geometries.", "fno": "2140a628", "keywords": [ "Tuning", "Data Structures", "Ray Tracing", "Geometry", "Hardware", "Parallel Algorithms", "Rendering Computer Graphics", "Parallel Performance Optimization", "Spatial Data Structures", "Online Autotuning" ], "authors": [ { "affiliation": null, "fullName": "Martin Tillmann", "givenName": "Martin", "surname": "Tillmann", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Philip Pfaffe", "givenName": "Philip", "surname": "Pfaffe", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christopher Kaag", "givenName": "Christopher", "surname": "Kaag", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Walter F. Tichy", "givenName": "Walter F.", "surname": "Tichy", "__typename": "ArticleAuthorType" } ], "idPrefix": "ipdps", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-05-01T00:00:00", "pubType": "proceedings", "pages": "628-637", "year": "2016", "issn": "1530-2075", "isbn": "978-1-5090-2140-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2140a618", "articleId": "12OmNzayNh8", "__typename": "AdjacentArticleType" }, "next": { "fno": "2140a638", "articleId": "12OmNAThXTQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rt/2006/0693/0/04061547", "title": "On building fast kd-Trees for Ray Tracing, and on doing that in O(N log N)", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061547/12OmNCcKQAt", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061550", "title": "Experiences with Streaming Construction of SAH KD-Trees", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061550/12OmNsbY6PY", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbac-pad/2009/3857/0/3857a041", "title": "kD-Tree Traversal Implementations for Ray Tracing on Massive Multiprocessors: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/sbac-pad/2009/3857a041/12OmNvJXeAz", "parentPublication": { "id": "proceedings/sbac-pad/2009/3857/0", "title": "Computer Architecture and High Performance Computing, Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2017/3408/0/07965198", "title": "Online-Autotuning in the Presence of Algorithmic Choice", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2017/07965198/12OmNxA3YWm", "parentPublication": { "id": "proceedings/ipdpsw/2017/3408/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061546", "title": "Omnidirectional Ray Tracing Traversal Algorithm for kd-trees", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061546/12OmNxEBzeX", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d120", "title": "Ray Tracing Dynamic Scenes using fast KD-tree Base on Multi-Core Architectures", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d120/12OmNyL0Twy", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061549", "title": "Fast kd-tree Construction with an Adaptive Error-Bounded Heuristic", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061549/12OmNzXFoKl", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmdcm/2011/4413/0/4413a185", "title": "SAH Based KD Tree Construction on Hybrid Architecture", "doi": null, "abstractUrl": "/proceedings-article/dmdcm/2011/4413a185/12OmNzvhvDe", "parentPublication": { "id": "proceedings/dmdcm/2011/4413/0", "title": "Digital Media and Digital Content Management, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cecit/2021/3757/0/375700b168", "title": "Stackless KD-Tree Traversal For Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/cecit/2021/375700b168/1CdEOBZgTVC", "parentPublication": { "id": "proceedings/cecit/2021/3757/0", "title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09541074", "title": "Analysis of Acceleration Structure Parameters and Hybrid Autotuning for Ray Tracing", "doi": null, "abstractUrl": "/journal/tg/2023/02/09541074/1x3fUMmATi8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1AIMU2L3vkk", "title": "2021 IEEE 14th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)", "acronym": "mcsoc", "groupId": "1801959", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1AIN1nVHnaw", "doi": "10.1109/MCSoC51149.2021.00044", "title": "Enhancing Autotuning Capability with a History Database", "normalizedTitle": "Enhancing Autotuning Capability with a History Database", "abstract": "Autotuning is gaining importance to achieve the best possible performance for exascale applications. The performance of an autotuner usually depends on the amount of performance data collected for the application, however, collecting performance data for large-scale applications is oftentimes an expensive and daunting task. This paper presents an autotuner database, which we call a history database, for enhancing the reusability and reproducibility of performance data. The history database is built into a publicly available autotuner called GPTune, and allows users to store performance data obtained from autotuning and download historical performance data provided by the same or other users. The database not only allows reuse of the best available tuning results for widely used codes but also enables transfer learning that can leverage knowledge of pre-trained performance models. An evaluation shows that, for ScaLAPACK&#x0027;s PDGEQRF routine, a transfer learning approach using the history database can attain up to 33% better tuning results compared to single task learning without using prior knowledge, on 2,048 cores of NERSC&#x0027;s Cori supercomputer.", "abstracts": [ { "abstractType": "Regular", "content": "Autotuning is gaining importance to achieve the best possible performance for exascale applications. The performance of an autotuner usually depends on the amount of performance data collected for the application, however, collecting performance data for large-scale applications is oftentimes an expensive and daunting task. This paper presents an autotuner database, which we call a history database, for enhancing the reusability and reproducibility of performance data. The history database is built into a publicly available autotuner called GPTune, and allows users to store performance data obtained from autotuning and download historical performance data provided by the same or other users. The database not only allows reuse of the best available tuning results for widely used codes but also enables transfer learning that can leverage knowledge of pre-trained performance models. An evaluation shows that, for ScaLAPACK&#x0027;s PDGEQRF routine, a transfer learning approach using the history database can attain up to 33% better tuning results compared to single task learning without using prior knowledge, on 2,048 cores of NERSC&#x0027;s Cori supercomputer.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Autotuning is gaining importance to achieve the best possible performance for exascale applications. The performance of an autotuner usually depends on the amount of performance data collected for the application, however, collecting performance data for large-scale applications is oftentimes an expensive and daunting task. This paper presents an autotuner database, which we call a history database, for enhancing the reusability and reproducibility of performance data. The history database is built into a publicly available autotuner called GPTune, and allows users to store performance data obtained from autotuning and download historical performance data provided by the same or other users. The database not only allows reuse of the best available tuning results for widely used codes but also enables transfer learning that can leverage knowledge of pre-trained performance models. An evaluation shows that, for ScaLAPACK's PDGEQRF routine, a transfer learning approach using the history database can attain up to 33% better tuning results compared to single task learning without using prior knowledge, on 2,048 cores of NERSC's Cori supercomputer.", "fno": "386000a249", "keywords": [ "Data Handling", "Learning Artificial Intelligence", "Parallel Processing", "Autotuning Capability", "History Database", "Exascale Applications", "Autotuner Database", "Historical Performance Data", "Pre Trained Performance Models", "Sca LAPACK PDGEQRF Routine", "Transfer Learning Approach", "NERSC Cori Supercomputer", "GP Tune", "Codes", "Databases", "Multicore Processing", "Transfer Learning", "Supercomputers", "Reproducibility Of Results", "History", "Autotuning", "Transfer Learning", "Crowd Tuning", "Exascale Computing Project" ], "authors": [ { "affiliation": "University of California, Berkeley,Department of Electrical Engineering and Computer Sciences,Berkeley,CA,USA", "fullName": "Younghyun Cho", "givenName": "Younghyun", "surname": "Cho", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California, Berkeley,Department of Electrical Engineering and Computer Sciences,Berkeley,CA,USA", "fullName": "James W. Demmel", "givenName": "James W.", "surname": "Demmel", "__typename": "ArticleAuthorType" }, { "affiliation": "Lawrence Berkeley National Laboratory,Computational Research Division,Berkeley,CA,USA", "fullName": "Xiaoye S. Li", "givenName": "Xiaoye S.", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Lawrence Berkeley National Laboratory,Computational Research Division,Berkeley,CA,USA", "fullName": "Yang Liu", "givenName": "Yang", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Lawrence Berkeley National Laboratory,Computational Research Division,Berkeley,CA,USA", "fullName": "Hengrui Luo", "givenName": "Hengrui", "surname": "Luo", "__typename": "ArticleAuthorType" } ], "idPrefix": "mcsoc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "249-257", "year": "2021", "issn": null, "isbn": "978-1-6654-3860-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "386000a242", "articleId": "1AIMVAdXbBC", "__typename": "AdjacentArticleType" }, "next": { "fno": "386000a258", "articleId": "1AIN0v0BiOA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sc/2009/744/0/06375564", "title": "Autotuning multigrid with PetaBricks", "doi": null, "abstractUrl": "/proceedings-article/sc/2009/06375564/12OmNrGKesq", "parentPublication": { "id": "proceedings/sc/2009/744/0", "title": "2009 SC Conference on High Performance Computing Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2017/3914/0/07967118", "title": "Autotuning Stencil Computations with Structural Ordinal Regression Learning", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2017/07967118/12OmNvSKNM4", "parentPublication": { "id": "proceedings/ipdps/2017/3914/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/med/2006/1/0/04124895", "title": "Autotuning Autopilots for Micro-ROVs", "doi": null, "abstractUrl": "/proceedings-article/med/2006/04124895/12OmNwe2IwK", "parentPublication": { "id": "proceedings/med/2006/1/0", "title": "Proceedings of the 14th Mediterranean Conference on Control and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2017/3408/0/07965198", "title": "Online-Autotuning in the Presence of Algorithmic Choice", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2017/07965198/12OmNxA3YWm", "parentPublication": { "id": "proceedings/ipdpsw/2017/3408/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2014/7615/0/07097851", "title": "Toward multi-target autotuning for accelerators", "doi": null, "abstractUrl": "/proceedings-article/icpads/2014/07097851/12OmNxdVh0t", "parentPublication": { "id": "proceedings/icpads/2014/7615/0", "title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2015/8297/0/8297a343", "title": "ANTAREX -- AutoTuning and Adaptivity appRoach for Energy Efficient eXascale HPC Systems", "doi": null, "abstractUrl": "/proceedings-article/cse/2015/8297a343/12OmNzwpUiu", "parentPublication": { "id": "proceedings/cse/2015/8297/0", "title": "2015 IEEE 18th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2018/5663/0/566301a436", "title": "Poster: Autotuning PostgreSQL: A Blueprint for Successful Autotuning of Real-World Applications", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2018/566301a436/13bd1eY1x3I", "parentPublication": { "id": "proceedings/icse-companion/2018/5663/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291913", "title": "Revisiting Online Autotuning for Sparse-Matrix Vector Multiplication Kernels on Next-Generation Architectures", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291913/17D45XtvpeW", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0", "title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2022/9856/0/985600a381", "title": "HPC Storage Service Autotuning Using Variational- Autoencoder -Guided Asynchronous Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/cluster/2022/985600a381/1HzBvW5yNNu", "parentPublication": { "id": "proceedings/cluster/2022/9856/0", "title": "2022 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2021/9184/0/918400c377", "title": "Nullius in Verba: Reproducibility for Database Systems Research, Revisited", "doi": null, "abstractUrl": "/proceedings-article/icde/2021/918400c377/1uGXQl1yAeI", "parentPublication": { "id": "proceedings/icde/2021/9184/0", "title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz4Bdh6", "title": "2014 14th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid)", "acronym": "ccgrid", "groupId": "1000093", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAo45FY", "doi": "10.1109/CCGrid.2014.59", "title": "A Robust and Fast Reconstruction Framework for Noisy and Large Point Cloud Data", "normalizedTitle": "A Robust and Fast Reconstruction Framework for Noisy and Large Point Cloud Data", "abstract": "In this paper we present a robust reconstruction framework on noisy and large point cloud data. Though Poisson reconstruction performs well in recovering the surface from noisy point cloud data, it's problematic to reconstruct underlying surface from large cloud data, especially on a general processor. An inaccurate estimation of point normal for noisy and large dataset would result in local distortion on the reconstructed mesh. We adopt a systematical combination of Poisson-disk sampling, normal estimation and Poisson reconstruction to avoid the inaccuracy of normal calculated from k-nearest neighbors. With the fewer dataset obtained by sampling on original points, the normal estimated is more reliable for subsequent Poisson reconstruction and the time spent in normal estimation and reconstruction is much less. We demonstrate the effectiveness of the framework in recovering topology and geometry information when dealing with point cloud data from real world. The experiment results indicate that the framework is superior to Poisson reconstruction directly on raw point dataset in the aspects of time consumption and visual fidelity.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a robust reconstruction framework on noisy and large point cloud data. Though Poisson reconstruction performs well in recovering the surface from noisy point cloud data, it's problematic to reconstruct underlying surface from large cloud data, especially on a general processor. An inaccurate estimation of point normal for noisy and large dataset would result in local distortion on the reconstructed mesh. We adopt a systematical combination of Poisson-disk sampling, normal estimation and Poisson reconstruction to avoid the inaccuracy of normal calculated from k-nearest neighbors. With the fewer dataset obtained by sampling on original points, the normal estimated is more reliable for subsequent Poisson reconstruction and the time spent in normal estimation and reconstruction is much less. We demonstrate the effectiveness of the framework in recovering topology and geometry information when dealing with point cloud data from real world. The experiment results indicate that the framework is superior to Poisson reconstruction directly on raw point dataset in the aspects of time consumption and visual fidelity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a robust reconstruction framework on noisy and large point cloud data. Though Poisson reconstruction performs well in recovering the surface from noisy point cloud data, it's problematic to reconstruct underlying surface from large cloud data, especially on a general processor. An inaccurate estimation of point normal for noisy and large dataset would result in local distortion on the reconstructed mesh. We adopt a systematical combination of Poisson-disk sampling, normal estimation and Poisson reconstruction to avoid the inaccuracy of normal calculated from k-nearest neighbors. With the fewer dataset obtained by sampling on original points, the normal estimated is more reliable for subsequent Poisson reconstruction and the time spent in normal estimation and reconstruction is much less. We demonstrate the effectiveness of the framework in recovering topology and geometry information when dealing with point cloud data from real world. The experiment results indicate that the framework is superior to Poisson reconstruction directly on raw point dataset in the aspects of time consumption and visual fidelity.", "fno": "2784a828", "keywords": [ "Surface Reconstruction", "Surface Treatment", "Estimation", "Noise Measurement", "Noise", "Robustness", "Vectors", "Poisson Reconstruction", "Poisson Disk Sampling", "Normal Estimation", "K Nearest Neighbors" ], "authors": [ { "affiliation": null, "fullName": "Xiang Feng", "givenName": "Xiang", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiaoqing Yu", "givenName": "Xiaoqing", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wanggen Wan", "givenName": "Wanggen", "surname": "Wan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fabien Pfaender", "givenName": "Fabien", "surname": "Pfaender", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "J. Alfredo Sanchez", "givenName": "J. Alfredo", "surname": "Sanchez", "__typename": "ArticleAuthorType" } ], "idPrefix": "ccgrid", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "828-836", "year": "2014", "issn": null, "isbn": "978-1-4799-2784-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2784a819", "articleId": "12OmNyGbIlE", "__typename": "AdjacentArticleType" }, "next": { "fno": "2784a837", "articleId": "12OmNBSjIY6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2013/4983/0/4983a098", "title": "Robust Outlier Detection and Saliency Features Estimation in Point Cloud Data", "doi": null, "abstractUrl": "/proceedings-article/crv/2013/4983a098/12OmNAHmOqY", "parentPublication": { "id": "proceedings/crv/2013/4983/0", "title": "2013 International Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbg/2005/20/0/01500321", "title": "Robust filtering of noisy scattered point data", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500321/12OmNBRbkoQ", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a313", "title": "Out-of-Core Surface Reconstruction from Large Point Sets for Infrastructure Inspection", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a313/12OmNwp74Lc", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460427", "title": "Shape reconstruction with globally-optimized surface point selection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460427/12OmNwtEEGb", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840a969", "title": "Point-Based 3D Reconstruction of Thin Objects", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840a969/12OmNx7G68H", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a083", "title": "Robust Feature-Preserving Denoising of 3D Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a083/12OmNyRxFIQ", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a118", "title": "Point Cloud Noise and Outlier Removal for Image-Based 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a118/12OmNzsrwoo", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291962", "title": "Integrated Quality Mesh Generation for Poisson Surface Reconstruction in HPC Applications", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291962/17D45VsBTYE", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0", "title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09693131", "title": "Refine-Net: Normal Refinement Neural Network for Noisy Point Clouds", "doi": null, "abstractUrl": "/journal/tp/2023/01/09693131/1As6TjLcxmU", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150989", "title": "REIN: Flexible Mesh Generation from Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150989/1lPHzgCZfBm", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwMXnue", "title": "Future Generation Communication and Networking Symposia, International Conference on", "acronym": "fgcns", "groupId": "1002678", "volume": "3", "displayVolume": "3", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNrAv3Fi", "doi": "10.1109/FGCNS.2008.10", "title": "Noise Intensity-Based Denoising of Point-Sampled Geometry", "normalizedTitle": "Noise Intensity-Based Denoising of Point-Sampled Geometry", "abstract": "A denoising algorithm for point-sampled geometry is proposed based on noise intensity. The noise intensity of each point on point-sampled geometry (PSG) is first measured by using a combined criterion. Based on mean shift clustering, the PSG is then clustered in terms of the local geometry-features similarity. According to the cluster to which a sample point belongs, a moving least squares surface is constructed, and in combination with noise intensity, the PSG is finally denoised. Some experimental results demonstrate that the algorithm is robust, and can denoise the noise efficiently while preserving the surface features.", "abstracts": [ { "abstractType": "Regular", "content": "A denoising algorithm for point-sampled geometry is proposed based on noise intensity. The noise intensity of each point on point-sampled geometry (PSG) is first measured by using a combined criterion. Based on mean shift clustering, the PSG is then clustered in terms of the local geometry-features similarity. According to the cluster to which a sample point belongs, a moving least squares surface is constructed, and in combination with noise intensity, the PSG is finally denoised. Some experimental results demonstrate that the algorithm is robust, and can denoise the noise efficiently while preserving the surface features.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A denoising algorithm for point-sampled geometry is proposed based on noise intensity. The noise intensity of each point on point-sampled geometry (PSG) is first measured by using a combined criterion. Based on mean shift clustering, the PSG is then clustered in terms of the local geometry-features similarity. According to the cluster to which a sample point belongs, a moving least squares surface is constructed, and in combination with noise intensity, the PSG is finally denoised. Some experimental results demonstrate that the algorithm is robust, and can denoise the noise efficiently while preserving the surface features.", "fno": "3546c027", "keywords": [ "Noise Intensity", "Mean Shift Clustering", "Moving Least Squares Surfaces", "Point Sampled Geometry Denoising" ], "authors": [ { "affiliation": null, "fullName": "Renfang Wang", "givenName": "Renfang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jifang Li", "givenName": "Jifang", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "fgcns", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "27-30", "year": "2008", "issn": null, "isbn": "978-0-7695-3546-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3546c090", "articleId": "12OmNwswg5O", "__typename": "AdjacentArticleType" }, "next": { "fno": "3546c031", "articleId": "12OmNwJybNz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/smi/2004/2075/0/20750233", "title": "Robust Watermarking of Point-Sampled Geometry", "doi": null, "abstractUrl": "/proceedings-article/smi/2004/20750233/12OmNBQ2VYF", "parentPublication": { "id": "proceedings/smi/2004/2075/0", "title": "Proceedings. Shape Modeling International 2004", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcse/2009/3881/2/3881b591", "title": "Voice Activity Detection Based on Harmonic Intensity under Complicated Noise Environment", "doi": null, "abstractUrl": "/proceedings-article/iwcse/2009/3881b591/12OmNBrlPDk", "parentPublication": { "id": "proceedings/iwcse/2009/3881/2", "title": "Computer Science and Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2004/2234/0/22340152", "title": "Radiosity for Point-Sampled Geometry", "doi": null, "abstractUrl": "/proceedings-article/pg/2004/22340152/12OmNqG0T5p", "parentPublication": { "id": "proceedings/pg/2004/2234/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wgec/2009/3899/0/3899a574", "title": "Algorithm for 3D Point Cloud Denoising", "doi": null, "abstractUrl": "/proceedings-article/wgec/2009/3899a574/12OmNqGA52e", "parentPublication": { "id": "proceedings/wgec/2009/3899/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060522", "title": "Environment Lighting for Point Sampled Geometry", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060522/12OmNqIQSkQ", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060353", "title": "Environment Lighting for Point Sampled Geometry", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060353/12OmNvDqsJ0", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2006/2754/0/27540260", "title": "Efficient Metamorphosis of Point-Sampled Geometry", "doi": null, "abstractUrl": "/proceedings-article/icat/2006/27540260/12OmNwFRp7y", "parentPublication": { "id": "proceedings/icat/2006/2754/0", "title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2009/3816/4/3816d491", "title": "Improved on Maximum Intensity Projection", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816d491/12OmNy6ZrZ3", "parentPublication": { "id": "proceedings/aici/2009/3816/4", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscv/1995/7190/0/71900223", "title": "Matching and recognition using deformable intensity surfaces", "doi": null, "abstractUrl": "/proceedings-article/iscv/1995/71900223/12OmNyFU74f", "parentPublication": { "id": "proceedings/iscv/1995/7190/0", "title": "Computer Vision, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2013/04/ttc2013040631", "title": "An Efficient Denoising Architecture for Removal of Impulse Noise in Images", "doi": null, "abstractUrl": "/journal/tc/2013/04/ttc2013040631/13rRUxASugP", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwD1pU4", "title": "2010 Third International Symposium on Information Science and Engineering", "acronym": "isise", "groupId": "1002561", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNvSbBuL", "doi": "10.1109/ISISE.2010.84", "title": "Statistic Model of the Spine in Three-Dimension Geometry", "normalizedTitle": "Statistic Model of the Spine in Three-Dimension Geometry", "abstract": "The study of the statistic model of the spine in three-dimension (3-D) geometry aims to provide a scientific basis for the spine and vertebra related medical surgery. In this paper, we adopt the Active Sharpe Model (ASM) to build a spinal statistical model. That is, we first locate and mark the feature points of the three-dimensional reconstructed medical Computed Tomography images, so as to obtain the shape matrix of each spine sample. Second, we align and register the shape matrix in the sample set with Iterative Closest Point (ICP). Third, we train the samples with Principal Component Analysis (PCA) and build the spinal statistical model in 3-D geometry. Finally, we evaluate the proposed model.", "abstracts": [ { "abstractType": "Regular", "content": "The study of the statistic model of the spine in three-dimension (3-D) geometry aims to provide a scientific basis for the spine and vertebra related medical surgery. In this paper, we adopt the Active Sharpe Model (ASM) to build a spinal statistical model. That is, we first locate and mark the feature points of the three-dimensional reconstructed medical Computed Tomography images, so as to obtain the shape matrix of each spine sample. Second, we align and register the shape matrix in the sample set with Iterative Closest Point (ICP). Third, we train the samples with Principal Component Analysis (PCA) and build the spinal statistical model in 3-D geometry. Finally, we evaluate the proposed model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The study of the statistic model of the spine in three-dimension (3-D) geometry aims to provide a scientific basis for the spine and vertebra related medical surgery. In this paper, we adopt the Active Sharpe Model (ASM) to build a spinal statistical model. That is, we first locate and mark the feature points of the three-dimensional reconstructed medical Computed Tomography images, so as to obtain the shape matrix of each spine sample. Second, we align and register the shape matrix in the sample set with Iterative Closest Point (ICP). Third, we train the samples with Principal Component Analysis (PCA) and build the spinal statistical model in 3-D geometry. Finally, we evaluate the proposed model.", "fno": "4360a066", "keywords": [ "Spine", "Active Shape Model", "Three Dimensional Model", "Principal Component Analysis" ], "authors": [ { "affiliation": null, "fullName": "Jun Dai", "givenName": "Jun", "surname": "Dai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bin Yu", "givenName": "Bin", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ying Wang", "givenName": "Ying", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "isise", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-12-01T00:00:00", "pubType": "proceedings", "pages": "66-70", "year": "2010", "issn": null, "isbn": "978-0-7695-4360-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4360a062", "articleId": "12OmNvDqsM8", "__typename": "AdjacentArticleType" }, "next": { "fno": "4360a071", "articleId": "12OmNBhHtbJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2015/7143/0/7143b022", "title": "Structure Optimization of a Bi-planar Parallel Mechanism for Spine Surgeries", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2015/7143b022/12OmNAoDibT", "parentPublication": { "id": "proceedings/icmtma/2015/7143/0", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/phycon/2003/7939/1/79390276", "title": "Nonlinear switching dynamics in surface electromyography of the spine", "doi": null, "abstractUrl": "/proceedings-article/phycon/2003/79390276/12OmNB9t6rR", "parentPublication": { "id": "proceedings/phycon/2003/7939/1", "title": "Physics and Control, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a420", "title": "Study on Cervical Spine Stresses Based on Three-Dimensional Finite Element Method", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a420/12OmNwCsdOJ", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isms/2013/4963/0/4963a088", "title": "Neural Network Based Spinal Age Estimation Using Lumbar Spine Magnetic Resonance Images (MRI)", "doi": null, "abstractUrl": "/proceedings-article/isms/2013/4963a088/12OmNwuvrUP", "parentPublication": { "id": "proceedings/isms/2013/4963/0", "title": "Intelligent Systems, Modelling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1988/0878/0/00028399", "title": "The processing and recognition of X-rays of spine tumor", "doi": null, "abstractUrl": "/proceedings-article/icpr/1988/00028399/12OmNy7QfmR", "parentPublication": { "id": "proceedings/icpr/1988/0878/0", "title": "9th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1991/02/mcg1991020029", "title": "A Kinematic Model of the Human Spine and Torso", "doi": null, "abstractUrl": "/magazine/cg/1991/02/mcg1991020029/13rRUxBrGk1", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2018/6857/0/685700a194", "title": "Two Staged Machine Learning Network for Spine Segmentation and Recognition", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/685700a194/17D45VTRopr", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a696", "title": "Multi-perspectives 2D Spine CT images Segmentation of 3D Fuse Algorithm", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a696/1IlOgb601Fe", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a294", "title": "Automatic diagnosis of disc herniation based on DenseNet fusion model", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a294/1uGY2Z1VryM", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f276", "title": "Automatic Vertebra Localization and Identification in CT by Spine Rectification and Anatomically-constrained Optimization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f276/1yeKWTUY132", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9DFQXOSME", "doi": "10.1109/ICME52920.2022.9859723", "title": "Deep Geometry Post-Processing for Decompressed Point Clouds", "normalizedTitle": "Deep Geometry Post-Processing for Decompressed Point Clouds", "abstract": "Point cloud compression plays a crucial role in reducing the huge cost of data storage and transmission. However, distortions can be introduced into the decompressed point clouds due to quantization. In this paper, we propose a novel learning-based post-processing method to enhance the decompressed point clouds. Specifically, a voxelized point cloud is first divided into small cubes. Then, a 3D convolutional network is proposed to predict the occupancy probability for each location of a cube. We leverage both local and global contexts by generating multi-scale probabilities. These probabilities are progressively summed to predict the results in a coarse-to-fine manner. Finally, we obtain the geometry-refined point clouds based on the predicted probabilities. Different from previous methods, we deal with decompressed point clouds with huge variety of distortions using a single model. Experimental results show that the proposed method can significantly improve the quality of the decompressed point clouds, achieving 9.30dB BDPSNR gain on three representative datasets on average.", "abstracts": [ { "abstractType": "Regular", "content": "Point cloud compression plays a crucial role in reducing the huge cost of data storage and transmission. However, distortions can be introduced into the decompressed point clouds due to quantization. In this paper, we propose a novel learning-based post-processing method to enhance the decompressed point clouds. Specifically, a voxelized point cloud is first divided into small cubes. Then, a 3D convolutional network is proposed to predict the occupancy probability for each location of a cube. We leverage both local and global contexts by generating multi-scale probabilities. These probabilities are progressively summed to predict the results in a coarse-to-fine manner. Finally, we obtain the geometry-refined point clouds based on the predicted probabilities. Different from previous methods, we deal with decompressed point clouds with huge variety of distortions using a single model. Experimental results show that the proposed method can significantly improve the quality of the decompressed point clouds, achieving 9.30dB BDPSNR gain on three representative datasets on average.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Point cloud compression plays a crucial role in reducing the huge cost of data storage and transmission. However, distortions can be introduced into the decompressed point clouds due to quantization. In this paper, we propose a novel learning-based post-processing method to enhance the decompressed point clouds. Specifically, a voxelized point cloud is first divided into small cubes. Then, a 3D convolutional network is proposed to predict the occupancy probability for each location of a cube. We leverage both local and global contexts by generating multi-scale probabilities. These probabilities are progressively summed to predict the results in a coarse-to-fine manner. Finally, we obtain the geometry-refined point clouds based on the predicted probabilities. Different from previous methods, we deal with decompressed point clouds with huge variety of distortions using a single model. Experimental results show that the proposed method can significantly improve the quality of the decompressed point clouds, achieving 9.30dB BDPSNR gain on three representative datasets on average.", "fno": "09859723", "keywords": [ "Data Compression", "Geometry", "Learning Artificial Intelligence", "Probability", "Solid Modelling", "Video Coding", "Geometry Refined Point Clouds", "Decompressed Point Clouds", "Point Cloud Compression", "Novel Learning Based Post Processing Method", "Voxelized Point Cloud", "Noise Figure 9 3 D B", "Point Cloud Compression", "Convolutional Codes", "Geometry", "Visualization", "Three Dimensional Displays", "Quantization Signal", "Costs", "Point Cloud Compression", "Point Cloud Post Processing", "Geometry Refinement" ], "authors": [ { "affiliation": "Peking University Shenzhen Graduate School,China", "fullName": "Xiaoqing Fan", "givenName": "Xiaoqing", "surname": "Fan", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University Shenzhen Graduate School,China", "fullName": "Ge Li", "givenName": "Ge", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Peng Cheng Laboratory,Shenzhen,China", "fullName": "Dingquan Li", "givenName": "Dingquan", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University Shenzhen Graduate School,China", "fullName": "Yurui Ren", "givenName": "Yurui", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University Shenzhen Graduate School,China", "fullName": "Wei Gao", "givenName": "Wei", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University Shenzhen Graduate School,China", "fullName": "Thomas H. Li", "givenName": "Thomas H.", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859658", "articleId": "1G9EOMTDs3u", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859763", "articleId": "1G9EtdEdhBe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09858927", "title": "A Novel Grid-Based Geometry Compression Framework for Spinning Lidar Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09858927/1G9EN6WL3KE", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859772", "title": "LGP-Net: Local Geometry Preserving Network for Point Cloud Completion", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859772/1G9EQKPLOpO", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2022/9548/0/954800a096", "title": "Learning to Predict on Octree for Scalable Point Cloud Geometry Coding", "doi": null, "abstractUrl": "/proceedings-article/mipr/2022/954800a096/1Gvddm9kzTO", "parentPublication": { "id": "proceedings/mipr/2022/9548/0", "title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4799", "title": "3DAC: Learning Attribute Compression for Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4799/1H0LpmYzOeI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h213", "title": "Domain Adaptation on Point Clouds via Geometry-Aware Implicits", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h213/1H1jfiVcaM8", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8920", "title": "Surface Representation for Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8920/1H1jmGGv0eQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2022/5963/0/596300a021", "title": "Deep Learning-based Point Cloud Joint Geometry and Color Coding: Designing a Perceptually-Driven Differentiable Training Distortion Metric", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2022/596300a021/1JvaM0B3arm", "parentPublication": { "id": "proceedings/bigmm/2022/5963/0", "title": "2022 IEEE Eighth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f776", "title": "PointNeuron: 3D Neuron Reconstruction via Geometry and Topology Learning of Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f776/1KxUsxFsCfS", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrhciai/2022/9182/0/918200a095", "title": "A Multi-layer Residual Architecture for Point Cloud Geometry Compression", "doi": null, "abstractUrl": "/proceedings-article/vrhciai/2022/918200a095/1LxffyVc0Ew", "parentPublication": { "id": "proceedings/vrhciai/2022/9182/0", "title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1959", "title": "LBS Autoencoder: Self-Supervised Fitting of Articulated Meshes to Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1959/1gyrapLInSw", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3niQJVeOA", "doi": "10.1109/CVPR42600.2020.01325", "title": "Geometry and Learning Co-Supported Normal Estimation for Unstructured Point Cloud", "normalizedTitle": "Geometry and Learning Co-Supported Normal Estimation for Unstructured Point Cloud", "abstract": "In this paper, we propose a normal estimation method for unstructured point cloud. We observe that geometric estimators commonly focus more on feature preservation but are hard to tune parameters and sensitive to noise, while learning-based approaches pursue an overall normal estimation accuracy but cannot well handle challenging regions such as surface edges. This paper presents a novel normal estimation method, under the co-support of geometric estimator and deep learning. To lowering the learning difficulty, we first propose to compute a suboptimal initial normal at each point by searching for a best fitting patch. Based on the computed normal field, we design a normal-based height map network (NH-Net) to fine-tune the suboptimal normals. Qualitative and quantitative evaluations demonstrate the clear improvements of our results over both traditional methods and learning-based methods, in terms of estimation accuracy and feature recovery.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a normal estimation method for unstructured point cloud. We observe that geometric estimators commonly focus more on feature preservation but are hard to tune parameters and sensitive to noise, while learning-based approaches pursue an overall normal estimation accuracy but cannot well handle challenging regions such as surface edges. This paper presents a novel normal estimation method, under the co-support of geometric estimator and deep learning. To lowering the learning difficulty, we first propose to compute a suboptimal initial normal at each point by searching for a best fitting patch. Based on the computed normal field, we design a normal-based height map network (NH-Net) to fine-tune the suboptimal normals. Qualitative and quantitative evaluations demonstrate the clear improvements of our results over both traditional methods and learning-based methods, in terms of estimation accuracy and feature recovery.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a normal estimation method for unstructured point cloud. We observe that geometric estimators commonly focus more on feature preservation but are hard to tune parameters and sensitive to noise, while learning-based approaches pursue an overall normal estimation accuracy but cannot well handle challenging regions such as surface edges. This paper presents a novel normal estimation method, under the co-support of geometric estimator and deep learning. To lowering the learning difficulty, we first propose to compute a suboptimal initial normal at each point by searching for a best fitting patch. Based on the computed normal field, we design a normal-based height map network (NH-Net) to fine-tune the suboptimal normals. Qualitative and quantitative evaluations demonstrate the clear improvements of our results over both traditional methods and learning-based methods, in terms of estimation accuracy and feature recovery.", "fno": "716800n3235", "keywords": [ "Computational Geometry", "Learning Artificial Intelligence", "Neural Nets", "Normal Distribution", "Solid Modelling", "Surface Fitting", "Unstructured Point Cloud", "Geometric Estimator", "Feature Preservation", "Surface Edges", "Normal Estimation Method", "Deep Learning", "Normal Based Height Map Network", "Suboptimal Normals", "Learning Based Methods", "Three Dimensional Displays", "Estimation", "Noise Measurement", "Principal Component Analysis", "Silicon", "Learning Systems", "Neural Networks" ], "authors": [ { "affiliation": "Nanjing University of Aeronautics and Astronautics; MIIT Key Laboratory of Pattern Analysis and Machine Intelligence", "fullName": "Haoran Zhou", "givenName": "Haoran", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Aeronautics and Astronautics", "fullName": "Honghua Chen", "givenName": "Honghua", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Aeronautics and Astronautics; MIIT Key Laboratory of Pattern Analysis and Machine Intelligence", "fullName": "Yidan Feng", "givenName": "Yidan", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Shenzhen Institutes of Advanced Technology", "fullName": "Qiong Wang", "givenName": "Qiong", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Hong Kong Polytechnic University", "fullName": "Jing Qin", "givenName": "Jing", "surname": "Qin", "__typename": "ArticleAuthorType" }, { "affiliation": "Lingnan University", "fullName": "Haoran Xie", "givenName": "Haoran", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "The Open University of Hong Kong", "fullName": "Fu Lee Wang", "givenName": "Fu Lee", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Aeronautics and Astronautics; MIIT Key Laboratory of Pattern Analysis and Machine Intelligence", "fullName": "Mingqiang Wei", "givenName": "Mingqiang", "surname": "Wei", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Aeronautics and Astronautics", "fullName": "Jun Wang", "givenName": "Jun", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "13235-13244", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800n3225", "articleId": "1m3np6lblUA", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800n3245", "articleId": "1m3ot035DBC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2013/5099/0/5099a187", "title": "Normal Correction towards Smoothing Point-Based Surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a187/12OmNwFicSu", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2003/1896/0/18960362", "title": "Estimation-Quantization Geometry Coding Using Normal Meshes", "doi": null, "abstractUrl": "/proceedings-article/dcc/2003/18960362/12OmNwcUjUY", "parentPublication": { "id": "proceedings/dcc/2003/1896/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/02/ttp2013020381", "title": "Iterative Closest Normal Point for 3D Face Recognition", "doi": null, "abstractUrl": "/journal/tp/2013/02/ttp2013020381/13rRUx0xQ0F", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09693131", "title": "Refine-Net: Normal Refinement Neural Network for Noisy Point Clouds", "doi": null, "abstractUrl": "/journal/tp/2023/01/09693131/1As6TjLcxmU", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859360", "title": "Deep Point Cloud Normal Estimation via Triplet Learning (Demonstration)", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859360/1G4F1NSQSEo", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859844", "title": "Deep Point Cloud Normal Estimation Via Triplet Learning", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859844/1G9EiyfHOKY", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10091230", "title": "Contrastive Learning for Joint Normal Estimation and Point Cloud Filtering", "doi": null, "abstractUrl": "/journal/tg/5555/01/10091230/1M2IJGotwEU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c186", "title": "Normal Assisted Stereo Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c186/1m3noJ4DSSI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09543583", "title": "GeoDualCNN: Geometry-Supporting Dual Convolutional Neural Network for Noisy Point Clouds", "doi": null, "abstractUrl": "/journal/tg/2023/02/09543583/1x4UL7WJCKI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09609545", "title": "3D-CariGAN: An End-to-End Solution to 3D Caricature Generation From Normal Face Photos", "doi": null, "abstractUrl": "/journal/tg/2023/04/09609545/1yoxJacbZ4I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCctfjp", "title": "2008 XXI Brazilian Symposium on Computer Graphics and Image Processing", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyKJioj", "doi": "10.1109/SIBGRAPI.2008.40", "title": "PCA-Based 3D Face Photography", "normalizedTitle": "PCA-Based 3D Face Photography", "abstract": "This paper presents a 3D face photography system based on a small set of training facial range images. The training set is composed by 2D texture and 3D range images (i.e. geometry) of a single subject with different facial expressions. The basic idea behind the method is to create texture and geometry spaces based on the training set and transformations to go from one space to the other. The main goal of the proposed approach is to obtain a geometry representation of a given face provided as a texture image, which undergoes a series of transformations through the texture and geometry spaces. Facial feature points are obtained by an active shape model (ASM) extracted from the 2D gray-level images. PCA then is used to represent the face dataset, thus defining an orthonormal basis of texture and range data. An input face is given by a gray-level face image to which the ASM is matched. The extracted ASM is fed to the PCA basis representation and a 3D version of the 2D input image is built. The experimental results on static images and video sequences using seven samples as training dataset show rapid reconstructed 3D faces which maintain spatial coherence similar to the human perception, thus corroborating the efficiency of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a 3D face photography system based on a small set of training facial range images. The training set is composed by 2D texture and 3D range images (i.e. geometry) of a single subject with different facial expressions. The basic idea behind the method is to create texture and geometry spaces based on the training set and transformations to go from one space to the other. The main goal of the proposed approach is to obtain a geometry representation of a given face provided as a texture image, which undergoes a series of transformations through the texture and geometry spaces. Facial feature points are obtained by an active shape model (ASM) extracted from the 2D gray-level images. PCA then is used to represent the face dataset, thus defining an orthonormal basis of texture and range data. An input face is given by a gray-level face image to which the ASM is matched. The extracted ASM is fed to the PCA basis representation and a 3D version of the 2D input image is built. The experimental results on static images and video sequences using seven samples as training dataset show rapid reconstructed 3D faces which maintain spatial coherence similar to the human perception, thus corroborating the efficiency of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a 3D face photography system based on a small set of training facial range images. The training set is composed by 2D texture and 3D range images (i.e. geometry) of a single subject with different facial expressions. The basic idea behind the method is to create texture and geometry spaces based on the training set and transformations to go from one space to the other. The main goal of the proposed approach is to obtain a geometry representation of a given face provided as a texture image, which undergoes a series of transformations through the texture and geometry spaces. Facial feature points are obtained by an active shape model (ASM) extracted from the 2D gray-level images. PCA then is used to represent the face dataset, thus defining an orthonormal basis of texture and range data. An input face is given by a gray-level face image to which the ASM is matched. The extracted ASM is fed to the PCA basis representation and a 3D version of the 2D input image is built. The experimental results on static images and video sequences using seven samples as training dataset show rapid reconstructed 3D faces which maintain spatial coherence similar to the human perception, thus corroborating the efficiency of our approach.", "fno": "3358a313", "keywords": [ "Image Texture", "Principal Component Analysis", "PCA Based 3 D Face Photography", "Facial Range Images", "2 D Texture", "3 D Range Images", "Active Shape Model", "2 D Gray Level Images", "Face", "Geometry", "Three Dimensional Displays", "Training", "Image Reconstruction", "Solid Modeling", "Computational Modeling" ], "authors": [ { "affiliation": "IME, Univ. de Sao Paulo, Sao Paulo", "fullName": "Jesús P. Mena-Chalco", "givenName": "Jesús P.", "surname": "Mena-Chalco", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. Nac. de Mat. Pura e Aplic., Rio de Janeiro", "fullName": "Ives Macêdo", "givenName": "Ives", "surname": "Macêdo", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. Nac. de Mat. Pura e Aplic., Rio de Janeiro", "fullName": "Luiz Velho", "givenName": "Luiz", "surname": "Velho", "__typename": "ArticleAuthorType" }, { "affiliation": "IME, Univ. de Sao Paulo, Sao Paulo", "fullName": "Roberto M. Cesar Jr.", "givenName": "Roberto M.", "surname": "Cesar", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-10-01T00:00:00", "pubType": "proceedings", "pages": "313-320", "year": "2008", "issn": "1530-1834", "isbn": "978-0-7695-3358-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3358a179", "articleId": "12OmNBfqG6P", "__typename": "AdjacentArticleType" }, "next": { "fno": "3358a189", "articleId": "12OmNBqdr58", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cad-graphics/2013/2576/0/06815035", "title": "Face Image Illumination Transfer through Eye-Relit 3D Basis", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06815035/12OmNAgoV7Y", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b710", "title": "Makeup-Invariant Face Recognition by 3D Face: Modeling and Dual-Tree Complex Wavelet Transform from Women's 2D Real-World Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b710/12OmNs0C9BO", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209e708", "title": "Expression-Invariant Face Recognition via 3D Face Reconstruction Using Gabor Filter Bank from a 2D Single Image", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209e708/12OmNvjgWmT", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2012/4836/0/4836a046", "title": "3D Face Reconstruction Based on Geometric Transformation", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2012/4836a046/12OmNx5piSz", "parentPublication": { "id": "proceedings/icvrv/2012/4836/0", "title": "2012 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034c512", "title": "Pix2Face: Direct 3D Face Model Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c512/12OmNxWuiy8", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f553", "title": "Learning Detailed Face Reconstruction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f553/12OmNxvO05B", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h346", "title": "Nonlinear 3D Face Morphable Model", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h346/17D45Xi9rWU", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/6.946E232", "title": "Learning to Restore 3D Face from In-the-Wild Degraded Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/6.946E232/1H1ibAktWTK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102811", "title": "Expression-Aware Face Reconstruction Via A Dual-Stream Network", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102811/1kwr15w4dQQ", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a598", "title": "FaceScape: A Large-Scale High Quality 3D Face Dataset and Detailed Riggable 3D Face Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a598/1m3oolNr35C", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0Njf7XQtO", "doi": "10.1109/CVPR52688.2022.00610", "title": "StyleMesh: Style Transfer for Indoor 3D Scene Reconstructions", "normalizedTitle": "StyleMesh: Style Transfer for Indoor 3D Scene Reconstructions", "abstract": "We apply style transfer on mesh reconstructions of indoor scenes. This enables VR applications like experiencing 3D environments painted in the style of a favorite artist. Style transfer typically operates on 2D images, making stylization of a mesh challenging. When optimized over a variety of poses, stylization patterns become stretched out and inconsistent in size. On the other hand, model-based 3D style transfer methods exist that allow stylization from a sparse set of images, but they require a network at inference time. To this end, we optimize an explicit texture for the reconstructed mesh of a scene and stylize it jointly from all available input images. Our depth- and angle-aware optimization leverages surface normal and depth data of the underlying mesh to create a uniform and consistent stylization for the whole scene. Our experiments show that our method creates sharp and detailed results for the complete scene without view-dependent artifacts. Through extensive ablation studies, we show that the proposed 3D awareness enables style transfer to be applied to the 3D domain of a mesh. Our method<sup>1</sup><sup>1</sup>https://lukashoel.github.io/stylemesh/ can be used to render a stylized mesh in real-time with traditional rendering pipelines.", "abstracts": [ { "abstractType": "Regular", "content": "We apply style transfer on mesh reconstructions of indoor scenes. This enables VR applications like experiencing 3D environments painted in the style of a favorite artist. Style transfer typically operates on 2D images, making stylization of a mesh challenging. When optimized over a variety of poses, stylization patterns become stretched out and inconsistent in size. On the other hand, model-based 3D style transfer methods exist that allow stylization from a sparse set of images, but they require a network at inference time. To this end, we optimize an explicit texture for the reconstructed mesh of a scene and stylize it jointly from all available input images. Our depth- and angle-aware optimization leverages surface normal and depth data of the underlying mesh to create a uniform and consistent stylization for the whole scene. Our experiments show that our method creates sharp and detailed results for the complete scene without view-dependent artifacts. Through extensive ablation studies, we show that the proposed 3D awareness enables style transfer to be applied to the 3D domain of a mesh. Our method<sup>1</sup><sup>1</sup>https://lukashoel.github.io/stylemesh/ can be used to render a stylized mesh in real-time with traditional rendering pipelines.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We apply style transfer on mesh reconstructions of indoor scenes. This enables VR applications like experiencing 3D environments painted in the style of a favorite artist. Style transfer typically operates on 2D images, making stylization of a mesh challenging. When optimized over a variety of poses, stylization patterns become stretched out and inconsistent in size. On the other hand, model-based 3D style transfer methods exist that allow stylization from a sparse set of images, but they require a network at inference time. To this end, we optimize an explicit texture for the reconstructed mesh of a scene and stylize it jointly from all available input images. Our depth- and angle-aware optimization leverages surface normal and depth data of the underlying mesh to create a uniform and consistent stylization for the whole scene. Our experiments show that our method creates sharp and detailed results for the complete scene without view-dependent artifacts. Through extensive ablation studies, we show that the proposed 3D awareness enables style transfer to be applied to the 3D domain of a mesh. Our method11https://lukashoel.github.io/stylemesh/ can be used to render a stylized mesh in real-time with traditional rendering pipelines.", "fno": "694600g188", "keywords": [ "Computational Geometry", "Computer Graphics", "Image Reconstruction", "Image Texture", "Mesh Generation", "Rendering Computer Graphics", "Virtual Reality", "Style Transfer", "Indoor 3 D", "Mesh Reconstructions", "Indoor Scenes", "Mesh Challenging", "Stylization Patterns", "Model Based 3 D", "Reconstructed Mesh", "Consistent Stylization", "Complete Scene", "Stylized Mesh", "Surface Reconstruction", "Solid Modeling", "Computer Vision", "Three Dimensional Displays", "Pipelines", "Rendering Computer Graphics", "Real Time Systems" ], "authors": [ { "affiliation": "Technical University of Munich", "fullName": "Lukas Höllein", "givenName": "Lukas", "surname": "Höllein", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Michigan", "fullName": "Justin Johnson", "givenName": "Justin", "surname": "Johnson", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Matthias Nießner", "givenName": "Matthias", "surname": "Nießner", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "6188-6198", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0NjagvQli", "name": "pcvpr202269460-09878370s1-mm_694600g188.zip", "size": "11.1 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878370s1-mm_694600g188.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600g177", "articleId": "1H0NScvhUC4", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600g199", "articleId": "1H1lwDdkMuI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457h178", "title": "Multimodal Transfer: A Hierarchical Deep Convolutional Neural Network for Fast Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457h178/12OmNxxvAQq", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i242", "title": "Avatar-Net: Multi-scale Zero-Shot Style Transfer by Feature Decoration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i242/17D45VUZMYV", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a009", "title": "MaeSTrO: A Mobile App for Style Transfer Orchestration Using Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a009/17D45XwUAKX", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500c978", "title": "PhotoWCT<sup>2</sup>: Compact Autoencoder for Photorealistic Style Transfer Resulting from Blockwise Training and Skip Connections of High-Frequency Residuals", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c978/1B13XjO9VyE", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4589", "title": "Domain-Aware Universal Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4589/1BmEW5hrQNW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4860", "title": "Diverse Image Style Transfer via Invertible Cross-Space Mapping", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4860/1BmHe6ICekw", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4598", "title": "StyleFormer: Real-time Arbitrary Style Transfer via Parametric Style Composition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4598/1BmL5FTElEI", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j035", "title": "Photorealistic Style Transfer via Wavelet Transforms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j035/1hQqmpD9dy8", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b857", "title": "Collaborative Distillation for Ultra-Resolution Universal Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b857/1m3o3Bu9VPa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900m2191", "title": "Rethinking Style Transfer: From Pixels to Parameterized Brushstrokes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900m2191/1yeKcTehYzu", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1bzYnKROnN6", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1bzYqBqSube", "doi": "10.1109/FG.2019.8756507", "title": "Photo-Realistic Exemplar-Based Face Ageing", "normalizedTitle": "Photo-Realistic Exemplar-Based Face Ageing", "abstract": "We propose a photo-realistic method for artificially ageing facial photographs by combining learned shape deformations with skin detail transfer between a donor and a receiver face. Facial ageing is a complicated process that most existing face models, such as 3d Morphable Models fail to express, due to lacking correspondence between the wrinkles of different individuals. We propose an exemplar-based approach to face ageing, where we transfer high-frequency details from an older face texture to a younger receiver. By warping the resulting image according to a learned shape ageing deformation, we obtain photo-realistic aged photographs. We evaluate the simulator with human perception experiments showing that we can indeed create results that are perceived to be real.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a photo-realistic method for artificially ageing facial photographs by combining learned shape deformations with skin detail transfer between a donor and a receiver face. Facial ageing is a complicated process that most existing face models, such as 3d Morphable Models fail to express, due to lacking correspondence between the wrinkles of different individuals. We propose an exemplar-based approach to face ageing, where we transfer high-frequency details from an older face texture to a younger receiver. By warping the resulting image according to a learned shape ageing deformation, we obtain photo-realistic aged photographs. We evaluate the simulator with human perception experiments showing that we can indeed create results that are perceived to be real.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a photo-realistic method for artificially ageing facial photographs by combining learned shape deformations with skin detail transfer between a donor and a receiver face. Facial ageing is a complicated process that most existing face models, such as 3d Morphable Models fail to express, due to lacking correspondence between the wrinkles of different individuals. We propose an exemplar-based approach to face ageing, where we transfer high-frequency details from an older face texture to a younger receiver. By warping the resulting image according to a learned shape ageing deformation, we obtain photo-realistic aged photographs. We evaluate the simulator with human perception experiments showing that we can indeed create results that are perceived to be real.", "fno": "08756507", "keywords": [ "Ageing", "Face Recognition", "Image Texture", "Shape Deformations", "Skin Detail Transfer", "Receiver Face", "Facial Ageing", "3 D Morphable Models", "Older Face Texture", "Younger Receiver", "Learned Shape Ageing Deformation", "Photo Realistic Aged Photographs", "Photo Realistic Exemplar", "Face", "Aging", "Shape", "Strain", "Three Dimensional Displays", "Solid Modeling", "Skin" ], "authors": [ { "affiliation": "Departement of Mathematics and Computer Science, University of Basel, Switzerland", "fullName": "Andreas Schneider", "givenName": "Andreas", "surname": "Schneider", "__typename": "ArticleAuthorType" }, { "affiliation": "Departement of Mathematics and Computer Science, University of Basel, Switzerland", "fullName": "Ghazi Bouabene", "givenName": "Ghazi", "surname": "Bouabene", "__typename": "ArticleAuthorType" }, { "affiliation": "L’Oréal Research and Innovation, Chevilly-Larue, France", "fullName": "Ayet Shaiek", "givenName": "Ayet", "surname": "Shaiek", "__typename": "ArticleAuthorType" }, { "affiliation": "Departement of Mathematics and Computer Science, University of Basel, Switzerland", "fullName": "Sandro Schönborn", "givenName": "Sandro", "surname": "Schönborn", "__typename": "ArticleAuthorType" }, { "affiliation": "L’Oréal Research and Innovation, Chevilly-Larue, France", "fullName": "Frédéric Flament", "givenName": "Frédéric", "surname": "Flament", "__typename": "ArticleAuthorType" }, { "affiliation": "L’Oréal Research and Innovation, Chevilly-Larue, France", "fullName": "Ghislain François", "givenName": "Ghislain", "surname": "François", "__typename": "ArticleAuthorType" }, { "affiliation": "L’Oréal Research and Innovation, Chevilly-Larue, France", "fullName": "Virginie Rubert", "givenName": "Virginie", "surname": "Rubert", "__typename": "ArticleAuthorType" }, { "affiliation": "Departement of Mathematics and Computer Science, University of Basel, Switzerland", "fullName": "Thomas Vetter", "givenName": "Thomas", "surname": "Vetter", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-05-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2019", "issn": null, "isbn": "978-1-7281-0089-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08756586", "articleId": "1bzYnWp7xzG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08756593", "articleId": "1bzYo1RbNuw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2000/0580/0/05800189", "title": "Face Analysis for the Synthesis of Photo-Realistic Talking Heads", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800189/12OmNBpVQ9P", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2000/0580/0/05800391", "title": "Towards Automatic Face Identification Robust to Ageing Variation", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800391/12OmNCbU31b", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239208", "title": "Data insufficiency in sketch versus photo face recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239208/12OmNxuXczC", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/06/08360505", "title": "CNN-Based Real-Time Dense Face Reconstruction with Inverse-Rendered Photo-Realistic Face Images", "doi": null, "abstractUrl": "/journal/tp/2019/06/08360505/13rRUxC0SFk", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a612", "title": "Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a612/17D45XreC7p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500c097", "title": "Photo-Realistic Facial Texture Transfer", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500c097/18j8PsEFTUs", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2023/4544/0/10042545", "title": "Weakly-Supervised Photo-realistic Texture Generation for 3D Face Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/fg/2023/10042545/1KOv4OD5vwY", "parentPublication": { "id": "proceedings/fg/2023/4544/0", "title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a858", "title": "Learning 3D Faces from Photo-Realistic Facial Synthesis", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a858/1qyxj7UfJIY", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a191", "title": "Residual Enhancement Network for Realistic Face Sketch-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a191/1xqyStBl8qc", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNx19k23", "doi": "10.1109/CVPR.2017.686", "title": "The Amazing Mysteries of the Gutter: Drawing Inferences Between Panels in Comic Book Narratives", "normalizedTitle": "The Amazing Mysteries of the Gutter: Drawing Inferences Between Panels in Comic Book Narratives", "abstract": "Visual narrative is often a combination of explicit information and judicious omissions, relying on the viewer to supply missing details. In comics, most movements in time and space are hidden in the gutters between panels. To follow the story, readers logically connect panels together by inferring unseen actions through a process called closure. While computers can now describe the content of natural images, in this paper we examine whether they can understand the closure-driven narratives conveyed by stylized artwork and dialogue in comic book panels. We collect a dataset, COMICS, that consists of over 1.2 million panels (120 GB) paired with automatic textbox transcriptions. An in-depth analysis of COMICS demonstrates that neither text nor image alone can tell a comic book story, so a computer must understand both modalities to keep up with the plot. We introduce three cloze-style tasks that ask models to predict narrative and character-centric aspects of a panel given n preceding panels as context. Various deep neural architectures underperform human baselines on these tasks, suggesting that COMICS contains fundamental challenges for both vision and language.", "abstracts": [ { "abstractType": "Regular", "content": "Visual narrative is often a combination of explicit information and judicious omissions, relying on the viewer to supply missing details. In comics, most movements in time and space are hidden in the gutters between panels. To follow the story, readers logically connect panels together by inferring unseen actions through a process called closure. While computers can now describe the content of natural images, in this paper we examine whether they can understand the closure-driven narratives conveyed by stylized artwork and dialogue in comic book panels. We collect a dataset, COMICS, that consists of over 1.2 million panels (120 GB) paired with automatic textbox transcriptions. An in-depth analysis of COMICS demonstrates that neither text nor image alone can tell a comic book story, so a computer must understand both modalities to keep up with the plot. We introduce three cloze-style tasks that ask models to predict narrative and character-centric aspects of a panel given n preceding panels as context. Various deep neural architectures underperform human baselines on these tasks, suggesting that COMICS contains fundamental challenges for both vision and language.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual narrative is often a combination of explicit information and judicious omissions, relying on the viewer to supply missing details. In comics, most movements in time and space are hidden in the gutters between panels. To follow the story, readers logically connect panels together by inferring unseen actions through a process called closure. While computers can now describe the content of natural images, in this paper we examine whether they can understand the closure-driven narratives conveyed by stylized artwork and dialogue in comic book panels. We collect a dataset, COMICS, that consists of over 1.2 million panels (120 GB) paired with automatic textbox transcriptions. An in-depth analysis of COMICS demonstrates that neither text nor image alone can tell a comic book story, so a computer must understand both modalities to keep up with the plot. We introduce three cloze-style tasks that ask models to predict narrative and character-centric aspects of a panel given n preceding panels as context. Various deep neural architectures underperform human baselines on these tasks, suggesting that COMICS contains fundamental challenges for both vision and language.", "fno": "0457g478", "keywords": [ "Art", "Image Colour Analysis", "Inference Mechanisms", "Interactive Systems", "Learning Artificial Intelligence", "Neural Nets", "Text Detection", "Amazing Mysteries", "Gutter", "Comic Book Narratives", "Visual Narrative", "Comic Book Panels", "COMICS", "Comic Book Story", "Character Centric Aspects", "Closure Driven Narratives", "Stylized Artwork", "COMICS Dataset", "Automatic Textbox Transcriptions", "Cloze Style Tasks", "Deep Neural Architectures", "Visualization", "Optical Character Recognition Software", "Image Segmentation", "Speech", "Predictive Models", "Coherence" ], "authors": [ { "affiliation": null, "fullName": "Mohit Iyyer", "givenName": "Mohit", "surname": "Iyyer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Varun Manjunatha", "givenName": "Varun", "surname": "Manjunatha", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anupam Guha", "givenName": "Anupam", "surname": "Guha", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yogarshi Vyas", "givenName": "Yogarshi", "surname": "Vyas", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jordan Boyd-Graber", "givenName": "Jordan", "surname": "Boyd-Graber", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hal Daumé", "givenName": "Hal", "surname": "Daumé", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Larry Davis", "givenName": "Larry", "surname": "Davis", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "6478-6487", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457g469", "articleId": "12OmNxGAL7E", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457g488", "articleId": "12OmNzTppDg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2017/3586/3/3586d060", "title": "Comic Story Analysis Based on Genre Classification", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586d060/12OmNrYCXT2", "parentPublication": { "id": "icdar/2017/3586/3", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2014/5569/0/06970183", "title": "Comic2CEBX: A system for automatic comic content adaptation", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2014/06970183/12OmNwnYG3h", "parentPublication": { "id": "proceedings/jcdl/2014/5569/0", "title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2013/4999/0/06628793", "title": "eBDtheque: A Representative Database of Comics", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628793/12OmNx5Yvoh", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/das/2012/4661/0/4661a424", "title": "Panel and Speech Balloon Extraction from Comic Books", "doi": null, "abstractUrl": "/proceedings-article/das/2012/4661a424/12OmNxYbSZ2", "parentPublication": { "id": "proceedings/das/2012/4661/0", "title": "Document Analysis Systems, IAPR International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2013/4999/0/06628627", "title": "Specific Comic Character Detection Using Local Feature Matching", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628627/12OmNyQpgYV", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2017/3586/3/3586d029", "title": "Segmentation-Free Speech Text Recognition for Comic Books", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586d029/12OmNznkK4z", "parentPublication": { "id": "icdar/2017/3586/3", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a367", "title": "Algorithms for estimation of comic speakers considering reading order of frames and texts", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a367/1GU75nKhrqg", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdarw/2019/5054/1/505401a044", "title": "What do We Expect from Comic Panel Extraction?", "doi": null, "abstractUrl": "/proceedings-article/icdarw/2019/505401a044/1eLyi3DlEyY", "parentPublication": { "id": "proceedings/icdarw/2019/5054/1", "title": "2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2020/8771/0/09122368", "title": "Method for Creating Motion Comic from Printed Comic", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2020/09122368/1kRSegi0b96", "parentPublication": { "id": "proceedings/nicoint/2020/8771/0", "title": "2020 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09547737", "title": "ChartStory: Automated Partitioning, Layout, and Captioning of Charts into Comic-Style Narratives", "doi": null, "abstractUrl": "/journal/tg/2023/02/09547737/1x9TL0bvSlq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKisA", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Xh13v2", "doi": "10.1109/AIVR.2018.00021", "title": "A Two-Level Planning Framework for Mixed Reality Interactive Narratives with User Engagement", "normalizedTitle": "A Two-Level Planning Framework for Mixed Reality Interactive Narratives with User Engagement", "abstract": "We present an event-based interactive storytelling system for virtual 3D environments that aims to offer free-form user experiences while constraining the narrative to follow author intent. The characters of our stories are represented as smart objects, each having their own state and set of capabilities that they expose to the virtual world. Our narratives are represented as a collection of branching stories, where narrative flow is controlled by author-defined states. A user model is employed to evaluate the user's engagement with smart objects and events, based on proximity, interaction patterns and visibility to the user. A two-level online planning system is designed to find the best narrative trajectory along pre-authored stories, according to the user model, and to generate a story sequence to the best trajectory with Monte Carlo Tree Search. We present the capabilities of our interactive storytelling system on an example story and describe the adaptations required for modeling user engagement in AR and VR applications.", "abstracts": [ { "abstractType": "Regular", "content": "We present an event-based interactive storytelling system for virtual 3D environments that aims to offer free-form user experiences while constraining the narrative to follow author intent. The characters of our stories are represented as smart objects, each having their own state and set of capabilities that they expose to the virtual world. Our narratives are represented as a collection of branching stories, where narrative flow is controlled by author-defined states. A user model is employed to evaluate the user's engagement with smart objects and events, based on proximity, interaction patterns and visibility to the user. A two-level online planning system is designed to find the best narrative trajectory along pre-authored stories, according to the user model, and to generate a story sequence to the best trajectory with Monte Carlo Tree Search. We present the capabilities of our interactive storytelling system on an example story and describe the adaptations required for modeling user engagement in AR and VR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an event-based interactive storytelling system for virtual 3D environments that aims to offer free-form user experiences while constraining the narrative to follow author intent. The characters of our stories are represented as smart objects, each having their own state and set of capabilities that they expose to the virtual world. Our narratives are represented as a collection of branching stories, where narrative flow is controlled by author-defined states. A user model is employed to evaluate the user's engagement with smart objects and events, based on proximity, interaction patterns and visibility to the user. A two-level online planning system is designed to find the best narrative trajectory along pre-authored stories, according to the user model, and to generate a story sequence to the best trajectory with Monte Carlo Tree Search. We present the capabilities of our interactive storytelling system on an example story and describe the adaptations required for modeling user engagement in AR and VR applications.", "fno": "926900a100", "keywords": [ "Computer Aided Instruction", "Humanities", "Monte Carlo Methods", "Tree Searching", "User Experience", "User Modelling", "Virtual Reality", "AR Application", "VR Application", "Monte Carlo Tree Search", "User Engagement Model", "Pre Authored Stories", "Narrative Trajectory", "Two Level Online Planning System", "Interaction Patterns", "User Model", "Author Defined States", "Branching Stories", "Smart Objects", "Free Form User Experiences", "Virtual 3 D Environments", "Event Based Interactive Storytelling System", "Mixed Reality Interactive Narratives", "Two Level Planning Framework", "Planning", "Three Dimensional Displays", "Solid Modeling", "Trajectory", "Runtime", "Visualization", "Artificial Intelligence", "Interactive Narratives", "Narrative Generation", "Planning", "Artificial Intelligence", "Monte Carlo Tree Search", "User Perception" ], "authors": [ { "affiliation": null, "fullName": "Manuel Braunschweiler", "givenName": "Manuel", "surname": "Braunschweiler", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Steven Poulakos", "givenName": "Steven", "surname": "Poulakos", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mubbasir Kapadia", "givenName": "Mubbasir", "surname": "Kapadia", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Robert W. Sumner", "givenName": "Robert W.", "surname": "Sumner", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-12-01T00:00:00", "pubType": "proceedings", "pages": "100-107", "year": "2018", "issn": null, "isbn": "978-1-5386-9269-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "926900a092", "articleId": "17D45VTRoDi", "__typename": "AdjacentArticleType" }, "next": { "fno": "926900a108", "articleId": "17D45WrVgeD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vs-games/2016/2722/0/07590383", "title": "Yoway: Coupling Narrative Structure with Physical Exploration in Multi-Linear Locative Narratives", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2016/07590383/12OmNqN6R42", "parentPublication": { "id": "proceedings/vs-games/2016/2722/0", "title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/03/mcg2006030023", "title": "From Linear Story Generation to Branching Story Graphs", "doi": null, "abstractUrl": "/magazine/cg/2006/03/mcg2006030023/13rRUwInv91", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2014/02/06604407", "title": "Personalized Interactive Narratives via Sequential Recommendation of Plot Points", "doi": null, "abstractUrl": "/journal/ci/2014/02/06604407/13rRUxASuk7", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2017/04/07519091", "title": "Leveraging Intention Revision in Narrative Planning to Create Suspenseful Stories", "doi": null, "abstractUrl": "/journal/ci/2017/04/07519091/13rRUxCitLM", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2014/02/06571208", "title": "Shall i compare thee to another story?: an empirical study of analogy-based story generation", "doi": null, "abstractUrl": "/journal/ci/2014/02/06571208/13rRUxEhFv8", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2015/04/06874544", "title": "Automated Planning and Player Modeling for Interactive Storytelling", "doi": null, "abstractUrl": "/journal/ci/2015/04/06874544/13rRUxNmPGq", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2015/01/06815689", "title": "Suspenser: a story generation system for suspense", "doi": null, "abstractUrl": "/journal/ci/2015/01/06815689/13rRUxk89gI", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017584", "title": "Visualizing Nonlinear Narratives with Story Curves", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017584/13rRUyueghe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2018/9605/0/960500a167", "title": "Implementation and Analysis of a Non-deterministic Drama Manager", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2018/960500a167/17D45WHONqG", "parentPublication": { "id": "proceedings/sbgames/2018/9605/0", "title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09547737", "title": "ChartStory: Automated Partitioning, Layout, and Captioning of Charts into Comic-Style Narratives", "doi": null, "abstractUrl": "/journal/tg/2023/02/09547737/1x9TL0bvSlq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1grOiRpGmv6", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1grOlrH5hdK", "doi": "10.1109/AIVR46125.2019.00024", "title": "A Live Storytelling Virtual Reality System with Programmable Cartoon-Style Emotion Embodiment", "normalizedTitle": "A Live Storytelling Virtual Reality System with Programmable Cartoon-Style Emotion Embodiment", "abstract": "Virtual reality (VR) is a promising new medium for immersive storytelling. While previous research works on VR narrative have tried to engage audiences through nice scenes and interactivity, the emerging live streaming shows the role of a presenter, especially the conveyance of emotion, for promoting audience involvement and enjoyment. In this paper, to lower the requirement of emotion embodiment, we borrow experience from cartoon animation and comics, and propose a novel cartoon-style hybrid emotion embodiment model to increase a storyteller's presence during live performance, which contains an avatar with six basic emotions and auxiliary multimodal display to enhance emotion expressing. We further design and implement a system to teleoperate the embodiment model in VR for live storytelling. In particular, 1) we design a novel visual programming tool that allows users to customize emotional effects based on the emotion embodiment model; 2) we design a novel face tracking module to map presenters' emotional states to the avatar in VR. Our lightweight web-based implementation also makes the application very easy to use. We conduct two preliminary qualitative studies to explore the potential of the hybrid model and the storytelling system, including interviews with three experts and a workshop study with local secondary school students. Results show the potential of the VR storytelling system for education.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) is a promising new medium for immersive storytelling. While previous research works on VR narrative have tried to engage audiences through nice scenes and interactivity, the emerging live streaming shows the role of a presenter, especially the conveyance of emotion, for promoting audience involvement and enjoyment. In this paper, to lower the requirement of emotion embodiment, we borrow experience from cartoon animation and comics, and propose a novel cartoon-style hybrid emotion embodiment model to increase a storyteller's presence during live performance, which contains an avatar with six basic emotions and auxiliary multimodal display to enhance emotion expressing. We further design and implement a system to teleoperate the embodiment model in VR for live storytelling. In particular, 1) we design a novel visual programming tool that allows users to customize emotional effects based on the emotion embodiment model; 2) we design a novel face tracking module to map presenters' emotional states to the avatar in VR. Our lightweight web-based implementation also makes the application very easy to use. We conduct two preliminary qualitative studies to explore the potential of the hybrid model and the storytelling system, including interviews with three experts and a workshop study with local secondary school students. Results show the potential of the VR storytelling system for education.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) is a promising new medium for immersive storytelling. While previous research works on VR narrative have tried to engage audiences through nice scenes and interactivity, the emerging live streaming shows the role of a presenter, especially the conveyance of emotion, for promoting audience involvement and enjoyment. In this paper, to lower the requirement of emotion embodiment, we borrow experience from cartoon animation and comics, and propose a novel cartoon-style hybrid emotion embodiment model to increase a storyteller's presence during live performance, which contains an avatar with six basic emotions and auxiliary multimodal display to enhance emotion expressing. We further design and implement a system to teleoperate the embodiment model in VR for live storytelling. In particular, 1) we design a novel visual programming tool that allows users to customize emotional effects based on the emotion embodiment model; 2) we design a novel face tracking module to map presenters' emotional states to the avatar in VR. Our lightweight web-based implementation also makes the application very easy to use. We conduct two preliminary qualitative studies to explore the potential of the hybrid model and the storytelling system, including interviews with three experts and a workshop study with local secondary school students. Results show the potential of the VR storytelling system for education.", "fno": "560400a102", "keywords": [ "Emotion Embodiment", "Social Presence", "Avatar", "Storytelling", "Education", "Visual Programming" ], "authors": [ { "affiliation": "Hong Kong University of Science and Technology", "fullName": "Zhenjie Zhao", "givenName": "Zhenjie", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong University of Science and Technology", "fullName": "Feng Han", "givenName": "Feng", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong University of Science and Technology", "fullName": "Xiaojuan Ma", "givenName": "Xiaojuan", "surname": "Ma", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "102-1027", "year": "2019", "issn": null, "isbn": "978-1-7281-5604-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "560400a098", "articleId": "1grOlosY4wg", "__typename": "AdjacentArticleType" }, "next": { "fno": "560400a110", "articleId": "1grOjTHg7QY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gmai/2006/2604/0/26040063", "title": "Multiresolution Hierarchy for Real-Time Cartoon-Style Rendering", "doi": null, "abstractUrl": "/proceedings-article/gmai/2006/26040063/12OmNBRbkpR", "parentPublication": { "id": "proceedings/gmai/2006/2604/0", "title": "Geometric Modeling and Imaging - New Trends", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isip/2010/4261/0/4261a495", "title": "A Preliminary Study on Nonverbal Emotion Interaction of Virtual Characters in Cartoon Games", "doi": null, "abstractUrl": "/proceedings-article/isip/2010/4261a495/12OmNyv7m1C", "parentPublication": { "id": "proceedings/isip/2010/4261/0", "title": "2010 Third International Symposium on Information Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2007/03/b3066", "title": "AniDiary: Daily Cartoon-Style Diary Exploits Bayesian Networks", "doi": null, "abstractUrl": "/magazine/pc/2007/03/b3066/13rRUwIF66u", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699214", "title": "Storytelling for Cinematic Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699214/19F1P5L3JV6", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imet/2022/7016/0/09929546", "title": "The Use of Storytelling in Virtual Reality for Studying Empathy: A Review", "doi": null, "abstractUrl": "/proceedings-article/imet/2022/09929546/1HYuWrZKF7G", "parentPublication": { "id": "proceedings/imet/2022/7016/0", "title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864520", "title": "Actors in VR storytelling", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864520/1e5ZrTyTjc4", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a251", "title": "Live Emoji: A Live Storytelling VR System with Programmable Cartoon-Style Emotion Embodiment", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a251/1grOk3JbfQk", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199571", "title": "Construction of the Virtual Embodiment Questionnaire (VEQ)", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199571/1ncgwAshfRS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a695", "title": "Fantastic Voyage 2021: Using Interactive VR Storytelling to Explain Targeted COVID-19 Vaccine Delivery to Antigen-presenting Cells", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a695/1tnX9sqXl6M", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a065", "title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvmowSz", "title": "International Conference on Culture and Computing", "acronym": "culture-computing", "groupId": "1800597", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNC4eSAC", "doi": "10.1109/Culture-Computing.2011.54", "title": "Generative Music Workshop", "normalizedTitle": "Generative Music Workshop", "abstract": "In this paper, we present our practice of Generative Music Workshop (2010-). This workshop is a series of events that reproduce past masterpieces of generative music. The aim of the workshop is historical re-examination of generative works to contribute to recent musical practices with mobile computing devices. We regard that the organization of sounds with environmental elements would be a significant for music application of mobile computer. With this view, this workshop reflects past experimental music/sound art works as one of the practices that is conscious of the relationship between generativity and environment. This paper depicts the diversity of the way to organize sounds in three works: Steve Reich's Pendulum music, Alvin Lucier's Music on a long thin wire, and Richard Lehman's Travelon Gamelon.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present our practice of Generative Music Workshop (2010-). This workshop is a series of events that reproduce past masterpieces of generative music. The aim of the workshop is historical re-examination of generative works to contribute to recent musical practices with mobile computing devices. We regard that the organization of sounds with environmental elements would be a significant for music application of mobile computer. With this view, this workshop reflects past experimental music/sound art works as one of the practices that is conscious of the relationship between generativity and environment. This paper depicts the diversity of the way to organize sounds in three works: Steve Reich's Pendulum music, Alvin Lucier's Music on a long thin wire, and Richard Lehman's Travelon Gamelon.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present our practice of Generative Music Workshop (2010-). This workshop is a series of events that reproduce past masterpieces of generative music. The aim of the workshop is historical re-examination of generative works to contribute to recent musical practices with mobile computing devices. We regard that the organization of sounds with environmental elements would be a significant for music application of mobile computer. With this view, this workshop reflects past experimental music/sound art works as one of the practices that is conscious of the relationship between generativity and environment. This paper depicts the diversity of the way to organize sounds in three works: Steve Reich's Pendulum music, Alvin Lucier's Music on a long thin wire, and Richard Lehman's Travelon Gamelon.", "fno": "4546a179", "keywords": [ "Generative Music", "Mobile Computing", "Reproduction", "Experimental Music", "Sound Art" ], "authors": [ { "affiliation": null, "fullName": "Tomotaro Kaneko", "givenName": "Tomotaro", "surname": "Kaneko", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kazuhiro Jo", "givenName": "Kazuhiro", "surname": "Jo", "__typename": "ArticleAuthorType" } ], "idPrefix": "culture-computing", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-10-01T00:00:00", "pubType": "proceedings", "pages": "179-180", "year": "2011", "issn": null, "isbn": "978-0-7695-4546-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4546a177", "articleId": "12OmNBKEyBU", "__typename": "AdjacentArticleType" }, "next": { "fno": "4546a181", "articleId": "12OmNAWH9BL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2017/3835/0/3835a817", "title": "Learning to Fuse Music Genres with Generative Adversarial Dual Learning", "doi": null, "abstractUrl": "/proceedings-article/icdm/2017/3835a817/12OmNAR1aTY", "parentPublication": { "id": "proceedings/icdm/2017/3835/0", "title": "2017 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2012/1120/0/S5002", "title": "A framework for interactive generation of music for games", "doi": null, "abstractUrl": "/proceedings-article/cgames/2012/S5002/12OmNBf94Yj", "parentPublication": { "id": "proceedings/cgames/2012/1120/0", "title": "2012 17th International Conference on Computer Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2009/3890/0/3890a001", "title": "Swarm Intelligence for Generative Music", "doi": null, "abstractUrl": "/proceedings-article/ism/2009/3890a001/12OmNrAdsxk", "parentPublication": { "id": "proceedings/ism/2009/3890/0", "title": "2009 11th IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1997/8262/0/82620499", "title": "A visualization of music", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1997/82620499/12OmNxzMnLE", "parentPublication": { "id": "proceedings/ieee-vis/1997/8262/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2018/7449/0/744900a786", "title": "Symbolic Music Genre Transfer with CycleGAN", "doi": null, "abstractUrl": "/proceedings-article/ictai/2018/744900a786/17D45VtKiuA", "parentPublication": { "id": "proceedings/ictai/2018/7449/0", "title": "2018 IEEE 30th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2019/1547/0/154700a453", "title": "Workshop on Requirements, Use Cases, and User Studies in Digital Music Libraries and Archives (RUCUS) 2019: A Half-Day Workshop", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2019/154700a453/1ckrIZhIyNa", "parentPublication": { "id": "proceedings/jcdl/2019/1547/0", "title": "2019 ACM/IEEE Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2020/9325/0/09232663", "title": "Unsupervised Generative Adversarial Alignment Representation for Sheet music, Audio and Lyrics", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2020/09232663/1o56D14NON2", "parentPublication": { "id": "proceedings/bigmm/2020/9325/0", "title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ctmcd/2021/4856/0/485600a107", "title": "The Application of Computer Music Production Software in Music Creation", "doi": null, "abstractUrl": "/proceedings-article/ctmcd/2021/485600a107/1uOumOU2WTm", "parentPublication": { "id": "proceedings/ctmcd/2021/4856/0", "title": "2021 International Conference on Computer Technology and Media Convergence Design (CTMCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a125", "title": "Application of Generative Adversarial Networks and Latent Space Exploration in Music Visualisation", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a125/1yBEZneGLug", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a137", "title": "Generation of Music With Dynamics Using Deep Convolutional Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a137/1yBEZuZOQXC", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7I", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "acronym": "icmtma", "groupId": "1002837", "volume": "2", "displayVolume": "2", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNz3bdE3", "doi": "10.1109/ICMTMA.2009.663", "title": "Simulation Design and Application of Music Playing Robot Based on SolidWorks", "normalizedTitle": "Simulation Design and Application of Music Playing Robot Based on SolidWorks", "abstract": "In accordance with the requirement of playing music, a music playing robot for performing a piece on a xylophone is developed. The principle and technique of simulation design is introduced. Three dimensions model of the robot is built by SolidWorks software. Parameterized design, virtual assembly, motion simulation and optimization design of the robot have been discussed. Diagrammatic curve of motion analyses is introduced. Displacement curves and velocity curves are given. And its control system is designed. The results indicate that the music playing robot with a single ballet can preferably imitate a hand to play music by beating keys of a xylophone. It has simple structure, agile motion and accurate playing positioning. The simulation modeling and virtual assembly of xylophone, mallet-moving unit, mallet-beating unit and bracket can detect problem on structure in design. Transmission relations, interference check-up and motion simulation ensure to accord preestablished movement project of the robot. Simulation design shortens design period and enhances design efficiency.", "abstracts": [ { "abstractType": "Regular", "content": "In accordance with the requirement of playing music, a music playing robot for performing a piece on a xylophone is developed. The principle and technique of simulation design is introduced. Three dimensions model of the robot is built by SolidWorks software. Parameterized design, virtual assembly, motion simulation and optimization design of the robot have been discussed. Diagrammatic curve of motion analyses is introduced. Displacement curves and velocity curves are given. And its control system is designed. The results indicate that the music playing robot with a single ballet can preferably imitate a hand to play music by beating keys of a xylophone. It has simple structure, agile motion and accurate playing positioning. The simulation modeling and virtual assembly of xylophone, mallet-moving unit, mallet-beating unit and bracket can detect problem on structure in design. Transmission relations, interference check-up and motion simulation ensure to accord preestablished movement project of the robot. Simulation design shortens design period and enhances design efficiency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In accordance with the requirement of playing music, a music playing robot for performing a piece on a xylophone is developed. The principle and technique of simulation design is introduced. Three dimensions model of the robot is built by SolidWorks software. Parameterized design, virtual assembly, motion simulation and optimization design of the robot have been discussed. Diagrammatic curve of motion analyses is introduced. Displacement curves and velocity curves are given. And its control system is designed. The results indicate that the music playing robot with a single ballet can preferably imitate a hand to play music by beating keys of a xylophone. It has simple structure, agile motion and accurate playing positioning. The simulation modeling and virtual assembly of xylophone, mallet-moving unit, mallet-beating unit and bracket can detect problem on structure in design. Transmission relations, interference check-up and motion simulation ensure to accord preestablished movement project of the robot. Simulation design shortens design period and enhances design efficiency.", "fno": "3583b339", "keywords": [ "Solid Works", "Music Playing Robot", "Virtual Assembly", "Motion Simulation", "Optimization Design" ], "authors": [ { "affiliation": null, "fullName": "Tingjun Wang", "givenName": "Tingjun", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shenshun Hu", "givenName": "Shenshun", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Xu", "givenName": "Jun", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dewei Yan", "givenName": "Dewei", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jiawen Bi", "givenName": "Jiawen", "surname": "Bi", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmtma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "339-342", "year": "2009", "issn": null, "isbn": "978-0-7695-3583-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3583b335", "articleId": "12OmNqGitYu", "__typename": "AdjacentArticleType" }, "next": { "fno": "3583b343", "articleId": "12OmNC8uRto", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cisis/2015/8870/0/8870a064", "title": "Real-Time Music Tracking Based on a Weightless Neural Network", "doi": null, "abstractUrl": "/proceedings-article/cisis/2015/8870a064/12OmNvA1hiV", "parentPublication": { "id": "proceedings/cisis/2015/8870/0", "title": "2015 Ninth International Conference on Complex, Intelligent, and Software Intensive Systems (CISIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bwcca/2011/4532/0/4532a065", "title": "An Interactive Music Learning System in Ensemble Performance Class", "doi": null, "abstractUrl": "/proceedings-article/bwcca/2011/4532a065/12OmNwqfsWB", "parentPublication": { "id": "proceedings/bwcca/2011/4532/0", "title": "2011 International Conference on Broadband and Wireless Computing, Communication and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2009/3769/0/3769b406", "title": "Physical Learning Activities with a Teaching Assistant Robot in Elementary School Music Class", "doi": null, "abstractUrl": "/proceedings-article/ncm/2009/3769b406/12OmNz5JCgd", "parentPublication": { "id": "proceedings/ncm/2009/3769/0", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2014/6541/0/07021782", "title": "Intuitive Gestural Interfaces / Adaptive Environments and Mobile Devices / Apps.: Playing Music and the Musical Work as a Role Model for Personalized Gestural Interaction in Social Environments", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2014/07021782/12OmNzTH0Rk", "parentPublication": { "id": "proceedings/icitcs/2014/6541/0", "title": "2014 International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc/2009/3737/0/3737a377", "title": "A User Location-Aware Music Playing System", "doi": null, "abstractUrl": "/proceedings-article/uic-atc/2009/3737a377/12OmNzXFoMj", "parentPublication": { "id": "proceedings/uic-atc/2009/3737/0", "title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/devlrn/2005/9226/0/01490970", "title": "It's a Child's Game: Investigating Cognitive Development with Playing Robots", "doi": null, "abstractUrl": "/proceedings-article/devlrn/2005/01490970/12OmNzw8j0S", "parentPublication": { "id": "proceedings/devlrn/2005/9226/0", "title": "International Conference on Development and Learning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2008/01/mco2008010046", "title": "Toward a Competitive Pool-Playing Robot", "doi": null, "abstractUrl": "/magazine/co/2008/01/mco2008010046/13rRUB6Sq3G", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2018/7744/0/774400a398", "title": "Barcode Music Score", "doi": null, "abstractUrl": "/proceedings-article/itme/2018/774400a398/17D45XDIXPh", "parentPublication": { "id": "proceedings/itme/2018/7744/0", "title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a549", "title": "Intelligent Tracked Robot Realizes Simple Detection", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a549/1KYtlRVA7yo", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09551755", "title": "A Music-Driven Deep Generative Adversarial Model for Guzheng Playing Animation", "doi": null, "abstractUrl": "/journal/tg/2023/02/09551755/1xgx3sOEUXS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx7ouU1", "title": "2010 International Conference on Cyberworlds", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzTYC9m", "doi": "10.1109/CW.2010.49", "title": "Computer Animation of Facial Emotions", "normalizedTitle": "Computer Animation of Facial Emotions", "abstract": "Computer facial animation still remains a very challenging topic within the computer graphics community. In this paper, a realistic and expressive computer facial animation system is developed by automated learning from Vicon Nexus facial motion capture data. Facial motion data of different emotions collected using Vicon Nexus are processed using dimensionality reduction techniques such as PCA and EMPCA. EMPCA was found to best preserve the originality of the data the most compared with other techniques. Ultimately, the emotions data are mapped to a 3D animated face, which produced results that clearly show the motion of the eyes, eyebrows, and lips. Our approach used data captured from a real speaker, resulting in more natural and lifelike facial animations. This approach can be used for various applications and serve as prototyping tool to automatically generate realistic and expressive facial animation.", "abstracts": [ { "abstractType": "Regular", "content": "Computer facial animation still remains a very challenging topic within the computer graphics community. In this paper, a realistic and expressive computer facial animation system is developed by automated learning from Vicon Nexus facial motion capture data. Facial motion data of different emotions collected using Vicon Nexus are processed using dimensionality reduction techniques such as PCA and EMPCA. EMPCA was found to best preserve the originality of the data the most compared with other techniques. Ultimately, the emotions data are mapped to a 3D animated face, which produced results that clearly show the motion of the eyes, eyebrows, and lips. Our approach used data captured from a real speaker, resulting in more natural and lifelike facial animations. This approach can be used for various applications and serve as prototyping tool to automatically generate realistic and expressive facial animation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computer facial animation still remains a very challenging topic within the computer graphics community. In this paper, a realistic and expressive computer facial animation system is developed by automated learning from Vicon Nexus facial motion capture data. Facial motion data of different emotions collected using Vicon Nexus are processed using dimensionality reduction techniques such as PCA and EMPCA. EMPCA was found to best preserve the originality of the data the most compared with other techniques. Ultimately, the emotions data are mapped to a 3D animated face, which produced results that clearly show the motion of the eyes, eyebrows, and lips. Our approach used data captured from a real speaker, resulting in more natural and lifelike facial animations. This approach can be used for various applications and serve as prototyping tool to automatically generate realistic and expressive facial animation.", "fno": "4215a425", "keywords": [ "Computer Facial Animation", "Dimensionality Reduction", "EMPCA", "Data Mapping" ], "authors": [ { "affiliation": null, "fullName": "Choong Seng Chan", "givenName": "Choong Seng", "surname": "Chan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Flora S. Tsai", "givenName": "Flora S.", "surname": "Tsai", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "425-429", "year": "2010", "issn": null, "isbn": "978-0-7695-4215-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4215a421", "articleId": "12OmNxeutcq", "__typename": "AdjacentArticleType" }, "next": { "fno": "4215a430", "articleId": "12OmNyQGSjF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccsee/2012/4647/3/4647c434", "title": "A Survey of Computer Facial Animation Techniques", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647c434/12OmNAXxXhU", "parentPublication": { "id": "proceedings/iccsee/2012/4647/3", "title": "Computer Science and Electronics Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2000/0580/0/00840628", "title": "Facial tracking and animation using a 3D sensor", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/00840628/12OmNBlFQZ9", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/2001/7237/0/00982371", "title": "Realistic 3D facial animation parameters from mirror-reflected multi-view video", "doi": null, "abstractUrl": "/proceedings-article/ca/2001/00982371/12OmNCcKQrq", "parentPublication": { "id": "proceedings/ca/2001/7237/0", "title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kse/2009/3846/0/3846a081", "title": "Fast and Realistic 2D Facial Animation Based on Image Warping", "doi": null, "abstractUrl": "/proceedings-article/kse/2009/3846a081/12OmNqGA59e", "parentPublication": { "id": "proceedings/kse/2009/3846/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1996/7588/0/75880098", "title": "Facial Animation", "doi": null, "abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2", "parentPublication": { "id": "proceedings/ca/1996/7588/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1996/7588/0/75880068", "title": "Modeling, Tracking and Interactive Animation of Faces and Heads Using Input from Video", "doi": null, "abstractUrl": "/proceedings-article/ca/1996/75880068/12OmNwfKjaJ", "parentPublication": { "id": "proceedings/ca/1996/7588/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/04/mcg2010040051", "title": "Modeling Short-Term Dynamics and Variability for Realistic Interactive Facial Animation", "doi": null, "abstractUrl": "/magazine/cg/2010/04/mcg2010040051/13rRUwgQpwW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/06/v1523", "title": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces", "doi": null, "abstractUrl": "/journal/tg/2006/06/v1523/13rRUxASubv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/04/mcg2017040030", "title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture", "doi": null, "abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b973", "title": "DeepFake MNIST+: A DeepFake Facial Animation Dataset", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b973/1yNipYT9XSo", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrYlmQE", "title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on", "acronym": "uic-atc", "groupId": "1002946", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNzXFoMj", "doi": "10.1109/UIC-ATC.2009.57", "title": "A User Location-Aware Music Playing System", "normalizedTitle": "A User Location-Aware Music Playing System", "abstract": "This research is aimed at a smart music playing environment in which a set of speakers are placed in different locations/rooms and one of the speakers will automatically play a music whenever a user gets close to it. Our study is focused on the design and prototype implementation of such a smart music playing system called U-LAMP (User Location-Aware Music Player). It consists of a server that stores streamed music data, and a set of clients that can receive and play the streamed music from the server. A user carries a RFID tag and can move around. When the user’s tag ID is detected by a RFID reader that is connected to a nearby client, the server will switch the transmission of streamed music data from the previous client to the current one. This paper explains in details about how to get a user’s location by detecting the user’s tag ID, manage RTP-based music data delivery from the server to the client, play a streamed music using JMF, and achieve better audio listening effects during the transition period of handing-over the playing music stream between speakers in different rooms.", "abstracts": [ { "abstractType": "Regular", "content": "This research is aimed at a smart music playing environment in which a set of speakers are placed in different locations/rooms and one of the speakers will automatically play a music whenever a user gets close to it. Our study is focused on the design and prototype implementation of such a smart music playing system called U-LAMP (User Location-Aware Music Player). It consists of a server that stores streamed music data, and a set of clients that can receive and play the streamed music from the server. A user carries a RFID tag and can move around. When the user’s tag ID is detected by a RFID reader that is connected to a nearby client, the server will switch the transmission of streamed music data from the previous client to the current one. This paper explains in details about how to get a user’s location by detecting the user’s tag ID, manage RTP-based music data delivery from the server to the client, play a streamed music using JMF, and achieve better audio listening effects during the transition period of handing-over the playing music stream between speakers in different rooms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This research is aimed at a smart music playing environment in which a set of speakers are placed in different locations/rooms and one of the speakers will automatically play a music whenever a user gets close to it. Our study is focused on the design and prototype implementation of such a smart music playing system called U-LAMP (User Location-Aware Music Player). It consists of a server that stores streamed music data, and a set of clients that can receive and play the streamed music from the server. A user carries a RFID tag and can move around. When the user’s tag ID is detected by a RFID reader that is connected to a nearby client, the server will switch the transmission of streamed music data from the previous client to the current one. This paper explains in details about how to get a user’s location by detecting the user’s tag ID, manage RTP-based music data delivery from the server to the client, play a streamed music using JMF, and achieve better audio listening effects during the transition period of handing-over the playing music stream between speakers in different rooms.", "fno": "3737a377", "keywords": [ "Ubiquitous", "Location Aware", "Streamed Media", "Music Play", "RFID" ], "authors": [ { "affiliation": null, "fullName": "Youhei Katori", "givenName": "Youhei", "surname": "Katori", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianhua Ma", "givenName": "Jianhua", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tomomi Kawashima", "givenName": "Tomomi", "surname": "Kawashima", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Runhe Huang", "givenName": "Runhe", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bernady O. Apduhan", "givenName": "Bernady O.", "surname": "Apduhan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qun Jin", "givenName": "Qun", "surname": "Jin", "__typename": "ArticleAuthorType" } ], "idPrefix": "uic-atc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-07-01T00:00:00", "pubType": "proceedings", "pages": "377-382", "year": "2009", "issn": null, "isbn": "978-0-7695-3737-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3737a373", "articleId": "12OmNvIfDPm", "__typename": "AdjacentArticleType" }, "next": { "fno": "3737a383", "articleId": "12OmNxRnvRv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icws/2010/4128/0/4128a465", "title": "User Experience in Added Value Location-Based Mobile Music Service", "doi": null, "abstractUrl": "/proceedings-article/icws/2010/4128a465/12OmNAoDinm", "parentPublication": { "id": "proceedings/icws/2010/4128/0", "title": "2010 IEEE International Conference on Web Services", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890619", "title": "Attaching-music: An interactive music delivery system for private listening as wherever you go", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890619/12OmNCyBXh2", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bwcca/2011/4532/0/4532a065", "title": "An Interactive Music Learning System in Ensemble Performance Class", "doi": null, "abstractUrl": "/proceedings-article/bwcca/2011/4532a065/12OmNwqfsWB", "parentPublication": { "id": "proceedings/bwcca/2011/4532/0", "title": "2011 International Conference on Broadband and Wireless Computing, Communication and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2008/3322/2/3322b369", "title": "Design of Ubiquitous Music Recommendation System Using MHMM", "doi": null, "abstractUrl": "/proceedings-article/ncm/2008/3322b369/12OmNxwncrM", "parentPublication": { "id": "proceedings/ncm/2008/3322/2", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/words/2005/2347/0/23470129", "title": "Distributed Computing Based Streaming and Play of Music Ensemble Realized Through TMO Programming", "doi": null, "abstractUrl": "/proceedings-article/words/2005/23470129/12OmNyNQSQc", "parentPublication": { "id": "proceedings/words/2005/2347/0", "title": "10th IEEE International Workshop on Object-Oriented Real-Time Dependable Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b339", "title": "Simulation Design and Application of Music Playing Robot Based on SolidWorks", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b339/12OmNz3bdE3", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icctd/2009/3892/1/3892a545", "title": "Music Playlist Recommendation Based on User Heartbeat and Music Preference", "doi": null, "abstractUrl": "/proceedings-article/icctd/2009/3892a545/12OmNzDNtnV", "parentPublication": { "id": "proceedings/icctd/2009/3892/1", "title": "2009 International Conference on Computer Technology and Development (ICCTD 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2014/6541/0/07021782", "title": "Intuitive Gestural Interfaces / Adaptive Environments and Mobile Devices / Apps.: Playing Music and the Musical Work as a Role Model for Personalized Gestural Interaction in Social Environments", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2014/07021782/12OmNzTH0Rk", "parentPublication": { "id": "proceedings/icitcs/2014/6541/0", "title": "2014 International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2018/7744/0/774400a398", "title": "Barcode Music Score", "doi": null, "abstractUrl": "/proceedings-article/itme/2018/774400a398/17D45XDIXPh", "parentPublication": { "id": "proceedings/itme/2018/7744/0", "title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09551755", "title": "A Music-Driven Deep Generative Adversarial Model for Guzheng Playing Animation", "doi": null, "abstractUrl": "/journal/tg/2023/02/09551755/1xgx3sOEUXS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmjmrPHrJS", "doi": "10.1109/ICPR48806.2021.9413167", "title": "Identity-Preserved Face Beauty Transformation with Conditional Generative Adversarial Networks", "normalizedTitle": "Identity-Preserved Face Beauty Transformation with Conditional Generative Adversarial Networks", "abstract": "Identity-preserved face beauty transformation aims to change the beauty scale of a face image while preserving the identity of the original face. In our framework of conditional Generative Adversarial Networks (cGANs), the synthesized face produced by the generator would have the same beauty scale indicated by the input condition. Unlike the discrete class labels used in most cGANs, the condition of target beauty scale in our framework is given by a continuous real-valued beauty score in the range [1 to 5], which makes the work challenging. To tackle the problem, we have implemented a triple structure, in which the conditional discriminator is divided into a normal discriminator and a separate face beauty predictor. We have also developed another new structure called Conditioned Instance Normalization to replace the original concatenation used in cGANs, which makes the combination of the input image and condition more effective. Furthermore, Self-Consistency Loss is introduced as a new parameter to improve the stability of training and quality of the generated image. In the end, the objectives of beauty transformation and identity preservation are evaluated by the pretrained face beauty predictor and state-of-the-art face recognition network. The result is encouraging and it also shows that certain facial features could be synthesized by the generator according to the target beauty scale, while preserving the original identity.", "abstracts": [ { "abstractType": "Regular", "content": "Identity-preserved face beauty transformation aims to change the beauty scale of a face image while preserving the identity of the original face. In our framework of conditional Generative Adversarial Networks (cGANs), the synthesized face produced by the generator would have the same beauty scale indicated by the input condition. Unlike the discrete class labels used in most cGANs, the condition of target beauty scale in our framework is given by a continuous real-valued beauty score in the range [1 to 5], which makes the work challenging. To tackle the problem, we have implemented a triple structure, in which the conditional discriminator is divided into a normal discriminator and a separate face beauty predictor. We have also developed another new structure called Conditioned Instance Normalization to replace the original concatenation used in cGANs, which makes the combination of the input image and condition more effective. Furthermore, Self-Consistency Loss is introduced as a new parameter to improve the stability of training and quality of the generated image. In the end, the objectives of beauty transformation and identity preservation are evaluated by the pretrained face beauty predictor and state-of-the-art face recognition network. The result is encouraging and it also shows that certain facial features could be synthesized by the generator according to the target beauty scale, while preserving the original identity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Identity-preserved face beauty transformation aims to change the beauty scale of a face image while preserving the identity of the original face. In our framework of conditional Generative Adversarial Networks (cGANs), the synthesized face produced by the generator would have the same beauty scale indicated by the input condition. Unlike the discrete class labels used in most cGANs, the condition of target beauty scale in our framework is given by a continuous real-valued beauty score in the range [1 to 5], which makes the work challenging. To tackle the problem, we have implemented a triple structure, in which the conditional discriminator is divided into a normal discriminator and a separate face beauty predictor. We have also developed another new structure called Conditioned Instance Normalization to replace the original concatenation used in cGANs, which makes the combination of the input image and condition more effective. Furthermore, Self-Consistency Loss is introduced as a new parameter to improve the stability of training and quality of the generated image. In the end, the objectives of beauty transformation and identity preservation are evaluated by the pretrained face beauty predictor and state-of-the-art face recognition network. The result is encouraging and it also shows that certain facial features could be synthesized by the generator according to the target beauty scale, while preserving the original identity.", "fno": "09413167", "keywords": [ "Face Recognition", "Feature Extraction", "Learning Artificial Intelligence", "Identity Preserved Face Beauty Transformation", "Conditional Generative Adversarial Networks", "Face Image", "Original Face", "C GAN", "Synthesized Face", "Input Condition", "Target Beauty Scale", "Continuous Real Valued Beauty Score", "Conditional Discriminator", "Pretrained Face Beauty Predictor", "Face Recognition Network", "Face Beauty Predictor", "Training", "Face Recognition", "Generative Adversarial Networks", "Generators", "Facial Features", "Face Beauty Transformation", "Identity Preserved", "C GA Ns" ], "authors": [ { "affiliation": "CENPARMI(Centre for Pattern Recognition and MI) Concordia University,Montreal,Canada", "fullName": "Zhitong Huang", "givenName": "Zhitong", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "CENPARMI(Centre for Pattern Recognition and MI) Concordia University,Montreal,Canada", "fullName": "Ching Yee Suen", "givenName": "Ching Yee", "surname": "Suen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "7273-7280", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09413241", "articleId": "1tmidg8zXCU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412461", "articleId": "1tmiFt2l7s4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000h939", "title": "Face Aging with Identity-Preserved Conditional Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h939/17D45WXIkI6", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a821", "title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4418", "title": "Dual Projection Generative Adversarial Networks for Conditional Image Generation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4418/1BmIOa3DnTq", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956126", "title": "Deep face generation from a rough sketch using multi-level generative adversarial networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956126/1IHoDYoZqnK", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a246", "title": "Understanding Beauty via Deep Facial Features", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a246/1iTvokzvgY0", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f283", "title": "GAN Compression: Efficient Architectures for Interactive Conditional GANs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f283/1m3og8x4vra", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f446", "title": "CIAGAN: Conditional Identity Anonymization Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f446/1m3orbD5kFq", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09241434", "title": "InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs", "doi": null, "abstractUrl": "/journal/tp/2022/04/09241434/1ogEwfwfCjC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412380", "title": "Coherence and Identity Learning for Arbitrary-length Face Video Generation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412380/1tmiEASmsGQ", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09609572", "title": "GAN Compression: Efficient Architectures for Interactive Conditional GANs", "doi": null, "abstractUrl": "/journal/tp/2022/12/09609572/1yoxFsTI4vu", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yBEZe3hqyQ", "title": "2021 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yBEZuZOQXC", "doi": "10.1109/CW52790.2021.00030", "title": "Generation of Music With Dynamics Using Deep Convolutional Generative Adversarial Network", "normalizedTitle": "Generation of Music With Dynamics Using Deep Convolutional Generative Adversarial Network", "abstract": "Following the rapid advancement of Artificial Intelligence and transition into the era of Big Data, researchers have started to explore the possibility of using machine learning in creative domains such as music generation. However, most research were focused on musical composition and removed expressive attributes during data pre-processing, which resulted in mechanical-sounding generated music. To address this issue, music elements, such as pitch, time and velocity, were extracted from MIDI tracks and encoded with piano-roll data representation. With the piano-roll data representation, Deep Convolutional Generative Adversarial Network (DCGAN) learned the data distribution from the given dataset and generated new data derived from the same distribution. The generated music was evaluated based on its incorporation of music dynamics and a user study. The evaluation results verified that DCGAN could generate expressive music comprising of music dynamics and syncopated rhythm.", "abstracts": [ { "abstractType": "Regular", "content": "Following the rapid advancement of Artificial Intelligence and transition into the era of Big Data, researchers have started to explore the possibility of using machine learning in creative domains such as music generation. However, most research were focused on musical composition and removed expressive attributes during data pre-processing, which resulted in mechanical-sounding generated music. To address this issue, music elements, such as pitch, time and velocity, were extracted from MIDI tracks and encoded with piano-roll data representation. With the piano-roll data representation, Deep Convolutional Generative Adversarial Network (DCGAN) learned the data distribution from the given dataset and generated new data derived from the same distribution. The generated music was evaluated based on its incorporation of music dynamics and a user study. The evaluation results verified that DCGAN could generate expressive music comprising of music dynamics and syncopated rhythm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Following the rapid advancement of Artificial Intelligence and transition into the era of Big Data, researchers have started to explore the possibility of using machine learning in creative domains such as music generation. However, most research were focused on musical composition and removed expressive attributes during data pre-processing, which resulted in mechanical-sounding generated music. To address this issue, music elements, such as pitch, time and velocity, were extracted from MIDI tracks and encoded with piano-roll data representation. With the piano-roll data representation, Deep Convolutional Generative Adversarial Network (DCGAN) learned the data distribution from the given dataset and generated new data derived from the same distribution. The generated music was evaluated based on its incorporation of music dynamics and a user study. The evaluation results verified that DCGAN could generate expressive music comprising of music dynamics and syncopated rhythm.", "fno": "406500a137", "keywords": [ "Audio Signal Processing", "Learning Artificial Intelligence", "Music", "Deep Convolutional Generative Adversarial Network", "Big Data", "Music Generation", "Musical Composition", "Data Pre Processing", "Mechanical Sounding Generated Music", "Music Elements", "Piano Roll Data Representation", "Data Distribution", "Music Dynamics", "Expressive Music Comprising", "Training", "Machine Learning", "Big Data", "Generative Adversarial Networks", "Rhythm", "Data Mining", "Music Generation", "DCGAN", "Dynamics", "Pianoroll" ], "authors": [ { "affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore", "fullName": "Raymond Kwan How Toh", "givenName": "Raymond Kwan How", "surname": "Toh", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore", "fullName": "Alexei Sourin", "givenName": "Alexei", "surname": "Sourin", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-09-01T00:00:00", "pubType": "proceedings", "pages": "137-140", "year": "2021", "issn": null, "isbn": "978-1-6654-4065-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "406500a133", "articleId": "1yBF6jBHxIY", "__typename": "AdjacentArticleType" }, "next": { "fno": "406500a141", "articleId": "1yBF6HRgLII", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/culture-computing/2011/4546/0/4546a179", "title": "Generative Music Workshop", "doi": null, "abstractUrl": "/proceedings-article/culture-computing/2011/4546a179/12OmNC4eSAC", "parentPublication": { "id": "proceedings/culture-computing/2011/4546/0", "title": "International Conference on Culture and Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a232", "title": "Font Creation Using Class Discriminative Deep Convolutional Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a232/17D45Xh13v0", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbase/2021/2709/0/270900a601", "title": "The comparison between Conditional Generative Adversarial Nets and Deep Convolutional Generative Adversarial Network, and its GUI-related application", "doi": null, "abstractUrl": "/proceedings-article/icbase/2021/270900a601/1AH8fU796co", "parentPublication": { "id": "proceedings/icbase/2021/2709/0", "title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismvl/2022/2395/0/239500a158", "title": "Optimizations of Ternary Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/ismvl/2022/239500a158/1Et62kmdiUM", "parentPublication": { "id": "proceedings/ismvl/2022/2395/0", "title": "2022 IEEE 52nd International Symposium on Multiple-Valued Logic (ISMVL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956322", "title": "Enhancing Population Diversity by Integrating Iterative Local Search with Deep Convolutional Generative Adversarial Networks (GANs)", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956322/1IHoU2bxju8", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a394", "title": "An intelligent music generation based on Variational Autoencoder", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a394/1p1gpeuOxb2", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413119", "title": "Signal Generation using 1d Deep Convolutional Generative Adversarial Networks for Fault Diagnosis of Electrical Machines", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413119/1tmhqhyXdM4", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2020/2314/0/231400b283", "title": "Spoken Keyword Detection Based on Wasserstein Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2020/231400b283/1tzyV383zd6", "parentPublication": { "id": "proceedings/icmcce/2020/2314/0", "title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09551755", "title": "A Music-Driven Deep Generative Adversarial Model for Guzheng Playing Animation", "doi": null, "abstractUrl": "/journal/tg/2023/02/09551755/1xgx3sOEUXS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a125", "title": "Application of Generative Adversarial Networks and Latent Space Exploration in Music Visualisation", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a125/1yBEZneGLug", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1fph1yO", "doi": "10.1109/VR.2018.8446240", "title": "The Relationship Between Visual Attention and Simulator Sickness: A Driving Simulation Study", "normalizedTitle": "The Relationship Between Visual Attention and Simulator Sickness: A Driving Simulation Study", "abstract": "Although visual attention cues are of particular importance for driving simulation tasks, research on the relationship of visual attention and simulator sickness is scarce. This exploratory study is aimed at investigating this relation with a laboratory study in a fixed-based driving simulator (N = 36). No correlation between visual attention and simulator sickness was shown, but the direction of the relation shows a negative tendency.", "abstracts": [ { "abstractType": "Regular", "content": "Although visual attention cues are of particular importance for driving simulation tasks, research on the relationship of visual attention and simulator sickness is scarce. This exploratory study is aimed at investigating this relation with a laboratory study in a fixed-based driving simulator (N = 36). No correlation between visual attention and simulator sickness was shown, but the direction of the relation shows a negative tendency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Although visual attention cues are of particular importance for driving simulation tasks, research on the relationship of visual attention and simulator sickness is scarce. This exploratory study is aimed at investigating this relation with a laboratory study in a fixed-based driving simulator (N = 36). No correlation between visual attention and simulator sickness was shown, but the direction of the relation shows a negative tendency.", "fno": "08446240", "keywords": [ "Behavioural Sciences Computing", "Digital Simulation", "Driver Information Systems", "Visual Attention Cues", "Simulator Sickness", "Driving Simulation Study", "Fixed Based Driving Simulator", "Visualization", "Solid Modeling", "Task Analysis", "Correlation", "Roads", "Electronic Mail", "Pressing", "Human Computer Interaction HCI HCI Design And Evaluation Methods User Studies", "Human Computer Interaction HCI HCI Design And Evaluation Methods Mixed Augmented Reality" ], "authors": [ { "affiliation": "TU llmenau", "fullName": "Anne Hoesch", "givenName": "Anne", "surname": "Hoesch", "__typename": "ArticleAuthorType" }, { "affiliation": "TU llmenau", "fullName": "Sandra Poeschl", "givenName": "Sandra", "surname": "Poeschl", "__typename": "ArticleAuthorType" }, { "affiliation": "TU llmenau", "fullName": "Florian Weidner", "givenName": "Florian", "surname": "Weidner", "__typename": "ArticleAuthorType" }, { "affiliation": "TU llmenau", "fullName": "Roberto Walter", "givenName": "Roberto", "surname": "Walter", "__typename": "ArticleAuthorType" }, { "affiliation": "TU llmenau", "fullName": "Nicola Doering", "givenName": "Nicola", "surname": "Doering", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1-2", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446262", "articleId": "13bd1fZBGcT", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446311", "articleId": "13bd1fdV4l0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811037", "title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892307", "title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836521", "title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836521/12OmNyo1nR0", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/289/1/05750766", "title": "Virtual Simulation Experiment Analysis of Chevron Deceleration Marking Based on Driving Simulator", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750766/12OmNzdoMrr", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a144", "title": "Comparative experiment of attention prompting methods using VR driving simulator", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a144/1KmFetCHntS", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798291", "title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798291/1cJ0GMB2sV2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798297", "title": "Unifying Research to Address Motion Sickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798297/1cJ13JSUePK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08798880", "title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness", "doi": null, "abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090670", "title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>&#x00B0;</sup> Videos Viewed with an HMD", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090670/1jIxwAw9Z9C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09551731", "title": "Learning from Deep Stereoscopic Attention for Simulator Sickness Prediction", "doi": null, "abstractUrl": "/journal/tg/2023/02/09551731/1xgx3DIeexq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmiMP82mre", "doi": "10.1109/ICPR48806.2021.9412423", "title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features", "normalizedTitle": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features", "abstract": "Virtual reality (VR) sickness is one of the obstacles hindering the growth of the VR market. Different VR contents may cause various degree of sickness. If the degree of the sickness can be estimated objectively, it adds a great value and help in designing the VR contents. To address this problem, a novel content-based VR sickness assessment method which considers both the perception prior and hybrid temporal features is proposed. Based on the perception prior which assumes the user's field of view becomes narrower while watching videos, a Gaussian weighted optical flow is calculated with a specified aspect ratio. In order to capture the dynamic characteristics, hybrid temporal features including horizontal motion, vertical motion and the proposed motion anisotropy are adopted. In addition, a new dataset is compiled with one hundred VR sickness test samples and each of which comes along with the Discomfort Scores (DS) answered by the user and a Simulator Sickness Questionnaire (SSQ) collected at the end of test. A random forest regressor is then trained on this dataset by feeding the hybrid temporal features of both the present and the previous minute. Extensive experiments are conducted on the VRSA dataset and the results demonstrate that the proposed method is comparable to the state-of-the-art method in terms of effectiveness and efficiency.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) sickness is one of the obstacles hindering the growth of the VR market. Different VR contents may cause various degree of sickness. If the degree of the sickness can be estimated objectively, it adds a great value and help in designing the VR contents. To address this problem, a novel content-based VR sickness assessment method which considers both the perception prior and hybrid temporal features is proposed. Based on the perception prior which assumes the user's field of view becomes narrower while watching videos, a Gaussian weighted optical flow is calculated with a specified aspect ratio. In order to capture the dynamic characteristics, hybrid temporal features including horizontal motion, vertical motion and the proposed motion anisotropy are adopted. In addition, a new dataset is compiled with one hundred VR sickness test samples and each of which comes along with the Discomfort Scores (DS) answered by the user and a Simulator Sickness Questionnaire (SSQ) collected at the end of test. A random forest regressor is then trained on this dataset by feeding the hybrid temporal features of both the present and the previous minute. Extensive experiments are conducted on the VRSA dataset and the results demonstrate that the proposed method is comparable to the state-of-the-art method in terms of effectiveness and efficiency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) sickness is one of the obstacles hindering the growth of the VR market. Different VR contents may cause various degree of sickness. If the degree of the sickness can be estimated objectively, it adds a great value and help in designing the VR contents. To address this problem, a novel content-based VR sickness assessment method which considers both the perception prior and hybrid temporal features is proposed. Based on the perception prior which assumes the user's field of view becomes narrower while watching videos, a Gaussian weighted optical flow is calculated with a specified aspect ratio. In order to capture the dynamic characteristics, hybrid temporal features including horizontal motion, vertical motion and the proposed motion anisotropy are adopted. In addition, a new dataset is compiled with one hundred VR sickness test samples and each of which comes along with the Discomfort Scores (DS) answered by the user and a Simulator Sickness Questionnaire (SSQ) collected at the end of test. A random forest regressor is then trained on this dataset by feeding the hybrid temporal features of both the present and the previous minute. Extensive experiments are conducted on the VRSA dataset and the results demonstrate that the proposed method is comparable to the state-of-the-art method in terms of effectiveness and efficiency.", "fno": "09412423", "keywords": [ "Ergonomics", "Image Sequences", "Learning Artificial Intelligence", "Regression Analysis", "Virtual Reality", "Virtual Reality Sickness", "VR Market", "Different VR Contents", "Novel Content Based VR Sickness Assessment", "Perception Prior", "Hybrid Temporal Features", "VR Sickness Test Samples", "Simulator Sickness Questionnaire", "Solid Modeling", "Dynamics", "Virtual Reality", "Forestry", "Predictive Models", "Feature Extraction", "Pattern Recognition", "VR Sickness Assessment", "Perception Prior", "Random Forest" ], "authors": [ { "affiliation": "The Graduate Institute of Networking and Multimedia, National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan", "fullName": "Po-Chen Kuo", "givenName": "Po-Chen", "surname": "Kuo", "__typename": "ArticleAuthorType" }, { "affiliation": "The Graduate Institute of Networking and Multimedia, National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan", "fullName": "Li-Chung Chuang", "givenName": "Li-Chung", "surname": "Chuang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Graduate Institute of Networking and Multimedia, National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan", "fullName": "Dong-Yi Lin", "givenName": "Dong-Yi", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "The Graduate Institute of Networking and Multimedia, National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan", "fullName": "Ming-Sui Lee", "givenName": "Ming-Sui", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "5558-5564", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09412023", "articleId": "1tmk2rEZAQw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412513", "articleId": "1tmjdxrrwNG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446346", "title": "Reducing VR Sickness Through Peripheral Visual Effects", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2022/02/09779506", "title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset", "doi": null, "abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cost/2022/6248/0/624800a169", "title": "Development of VR Motion Sickness Test Platform Based on UE", "doi": null, "abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg", "parentPublication": { "id": "proceedings/cost/2022/6248/0", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a094", "title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798136", "title": "VR Sickness in Continuous Exposure to Live-action 180&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08798880", "title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness", "doi": null, "abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089437", "title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a735", "title": "[DC] Towards Universal VR Sickness Mitigation Strategies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a426", "title": "Is Virtual Reality Sickness Elicited by Illusory Motion Affected by Gender and Prior Video Gaming Experience?", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a426/1tnXYDa4Wcg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyugyQo", "title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNCwCLld", "doi": "10.1109/WACV.2014.6836066", "title": "Viewpoint-independent book spine segmentation", "normalizedTitle": "Viewpoint-independent book spine segmentation", "abstract": "We propose a method to precisely segment books on bookshelves in images taken from general viewpoints. The proposed segmentation algorithm overcomes difficulties due to text and texture on book spines, various book orientations under perspective projection, and book proximity. A shape dependent active contour is used as a first step to establish a set of book spine candidates. A subset of these candidates are selected using spatial constraints on the assembly of spine candidates by formulating the selection problem as the maximal weighted independent set (MWIS) of a graph. The segmented book spines may be used by recognition systems (e.g., library automation), or rendered in computer graphics applications. We also propose a novel application that uses the segmented book spines to assist users in bookshelf reorganization or to modify the image to create a bookshelf with a tidier look. Our method was successfully tested on challenging sets of images.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a method to precisely segment books on bookshelves in images taken from general viewpoints. The proposed segmentation algorithm overcomes difficulties due to text and texture on book spines, various book orientations under perspective projection, and book proximity. A shape dependent active contour is used as a first step to establish a set of book spine candidates. A subset of these candidates are selected using spatial constraints on the assembly of spine candidates by formulating the selection problem as the maximal weighted independent set (MWIS) of a graph. The segmented book spines may be used by recognition systems (e.g., library automation), or rendered in computer graphics applications. We also propose a novel application that uses the segmented book spines to assist users in bookshelf reorganization or to modify the image to create a bookshelf with a tidier look. Our method was successfully tested on challenging sets of images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a method to precisely segment books on bookshelves in images taken from general viewpoints. The proposed segmentation algorithm overcomes difficulties due to text and texture on book spines, various book orientations under perspective projection, and book proximity. A shape dependent active contour is used as a first step to establish a set of book spine candidates. A subset of these candidates are selected using spatial constraints on the assembly of spine candidates by formulating the selection problem as the maximal weighted independent set (MWIS) of a graph. The segmented book spines may be used by recognition systems (e.g., library automation), or rendered in computer graphics applications. We also propose a novel application that uses the segmented book spines to assist users in bookshelf reorganization or to modify the image to create a bookshelf with a tidier look. Our method was successfully tested on challenging sets of images.", "fno": "06836066", "keywords": [ "Image Segmentation", "Image Edge Detection", "Shape", "Active Contours", "Three Dimensional Displays", "Optical Character Recognition Software", "Libraries" ], "authors": [ { "affiliation": "The Interdisciplinary Center, Herzliya 46150, Israel", "fullName": "Lior Talker", "givenName": "Lior", "surname": "Talker", "__typename": "ArticleAuthorType" }, { "affiliation": "The Interdisciplinary Center, Herzliya 46150, Israel", "fullName": "Yael Moses", "givenName": "Yael", "surname": "Moses", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-03-01T00:00:00", "pubType": "proceedings", "pages": "453-460", "year": "2014", "issn": null, "isbn": "978-1-4799-4985-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06836065", "articleId": "12OmNx5YvcO", "__typename": "AdjacentArticleType" }, "next": { "fno": "06836067", "articleId": "12OmNylKASg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2011/348/0/06012171", "title": "Mobile augmented reality for books on a shelf", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012171/12OmNqFJhFV", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2013/4999/0/06628812", "title": "An Active Contour Model for Speech Balloon Detection in Comics", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628812/12OmNwkzumt", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1988/0878/0/00028399", "title": "The processing and recognition of X-rays of spine tumor", "doi": null, "abstractUrl": "/proceedings-article/icpr/1988/00028399/12OmNy7QfmR", "parentPublication": { "id": "proceedings/icpr/1988/0878/0", "title": "9th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv-motion/2005/2271/1/227110218", "title": "The Smart Bookshelf: A Study of Camera Projector Scene Augmentation of an Everyday Environment", "doi": null, "abstractUrl": "/proceedings-article/wacv-motion/2005/227110218/12OmNyO8tTK", "parentPublication": { "id": "proceedings/wacv-motion/2005/2271/1", "title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2000/0484/0/04840255", "title": "Use of Shape Models to Search Digitized Spine X-rays", "doi": null, "abstractUrl": "/proceedings-article/cbms/2000/04840255/12OmNzRHOU2", "parentPublication": { "id": "proceedings/cbms/2000/0484/0", "title": "Proceedings 13th IEEE Symposium on Computer-Based Medical Systems. CBMS 2000", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/12/ttp2012122467", "title": "Whole-Book Recognition", "doi": null, "abstractUrl": "/journal/tp/2012/12/ttp2012122467/13rRUxAAT2q", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a696", "title": "Multi-perspectives 2D Spine CT images Segmentation of 3D Fuse Algorithm", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a696/1IlOgb601Fe", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933795", "title": "Interactive Dendritic Spine Analysis Based on 3D Morphological Features", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933795/1fTgFVHfgPe", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a386", "title": "Recognizing Call Numbers for Library Books: The Problem and Issues", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a386/1gjRz9MfyEw", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00797713", "title": "Extraction of bibliography information based on image of book cover", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00797713/1h0FVwaJ2gw", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx4Q6EK", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "acronym": "icmtma", "groupId": "1002837", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNrHSD3n", "doi": "10.1109/ICMTMA.2015.96", "title": "Detection and Evaluation on the Spine Tracking Accuracy during Image-Guided Radiation Therapy", "normalizedTitle": "Detection and Evaluation on the Spine Tracking Accuracy during Image-Guided Radiation Therapy", "abstract": "Utilize the head and neck phantom, lung phantom and simulated human phantom to detect the spine tracking accuracy of Cyber Knife during IGRT, and give evaluation. Use CT to scan the head and neck phantom, lung phantom and simulated human phantom which have films set in, then make use of the treatment planning system to design plans for tracking cervical vertebra, thoracic vertebra, lumbar vertebra, caudal vertebra and thigh bone. Cyber Knife carries out the plans for phantoms, and E2E software is used to analyze the irradiating accuracy. Results The accuracy of tracking cervical vertebra was 0.54 mm and 0.68 mm, thoracic vertebra, 0.50 mm and 0.65 mm, lumbar vertebra, 0.70 mm, sacral vertebrae, 1.99 mm, ilium, 2.21 mm, thigh bone, 2.05 mm. For these 3 static phantoms, the accuracy of tracking cervical vertebra, thoracic vertebra and lumbar vertebra all reached the sub millimeter level, and the tracking accuracy of sacral vertebrae, ilium and thigh bone was 2±0.21 mm. So the spine tracking accuracy is quite good.", "abstracts": [ { "abstractType": "Regular", "content": "Utilize the head and neck phantom, lung phantom and simulated human phantom to detect the spine tracking accuracy of Cyber Knife during IGRT, and give evaluation. Use CT to scan the head and neck phantom, lung phantom and simulated human phantom which have films set in, then make use of the treatment planning system to design plans for tracking cervical vertebra, thoracic vertebra, lumbar vertebra, caudal vertebra and thigh bone. Cyber Knife carries out the plans for phantoms, and E2E software is used to analyze the irradiating accuracy. Results The accuracy of tracking cervical vertebra was 0.54 mm and 0.68 mm, thoracic vertebra, 0.50 mm and 0.65 mm, lumbar vertebra, 0.70 mm, sacral vertebrae, 1.99 mm, ilium, 2.21 mm, thigh bone, 2.05 mm. For these 3 static phantoms, the accuracy of tracking cervical vertebra, thoracic vertebra and lumbar vertebra all reached the sub millimeter level, and the tracking accuracy of sacral vertebrae, ilium and thigh bone was 2±0.21 mm. So the spine tracking accuracy is quite good.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Utilize the head and neck phantom, lung phantom and simulated human phantom to detect the spine tracking accuracy of Cyber Knife during IGRT, and give evaluation. Use CT to scan the head and neck phantom, lung phantom and simulated human phantom which have films set in, then make use of the treatment planning system to design plans for tracking cervical vertebra, thoracic vertebra, lumbar vertebra, caudal vertebra and thigh bone. Cyber Knife carries out the plans for phantoms, and E2E software is used to analyze the irradiating accuracy. Results The accuracy of tracking cervical vertebra was 0.54 mm and 0.68 mm, thoracic vertebra, 0.50 mm and 0.65 mm, lumbar vertebra, 0.70 mm, sacral vertebrae, 1.99 mm, ilium, 2.21 mm, thigh bone, 2.05 mm. For these 3 static phantoms, the accuracy of tracking cervical vertebra, thoracic vertebra and lumbar vertebra all reached the sub millimeter level, and the tracking accuracy of sacral vertebrae, ilium and thigh bone was 2±0.21 mm. So the spine tracking accuracy is quite good.", "fno": "7143a374", "keywords": [ "Phantoms", "Accuracy", "Bones", "Tumors", "Head", "Lungs", "Neck", "Accuracy", "Cyber Knife", "IGRT", "Spine Tracking" ], "authors": [ { "affiliation": null, "fullName": "Li Yu", "givenName": "Li", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xu Hui-Jun", "givenName": "Xu", "surname": "Hui-Jun", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhang Su-Jing", "givenName": "Zhang", "surname": "Su-Jing", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yang Xiao", "givenName": "Yang", "surname": "Xiao", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmtma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "374-376", "year": "2015", "issn": null, "isbn": "978-1-4673-7143-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7143a370", "articleId": "12OmNzxgHtJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "7143a377", "articleId": "12OmNrGKeun", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cbmsys/1990/9040/0/00109445", "title": "High resolution anthropomorphic phantom for Monte Carlo analysis of internal radiation sources", "doi": null, "abstractUrl": "/proceedings-article/cbmsys/1990/00109445/12OmNCvumPq", "parentPublication": { "id": "proceedings/cbmsys/1990/9040/0", "title": "1990 Proceedings Third Annual IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130383", "title": "Multi-modal surface registration for markerless initial patient setup in radiation therapy using microsoft's Kinect sensor", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130383/12OmNyL0TwP", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2014/5669/0/06999228", "title": "Automatic and fast registration method for image-guided surgery", "doi": null, "abstractUrl": "/proceedings-article/bibm/2014/06999228/12OmNzA6GHh", "parentPublication": { "id": "proceedings/bibm/2014/5669/0", "title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2012/0430/0/06386488", "title": "An improved rigidity penalty for deformable registration of head and neck images in intensity-modulated radiation therapy", "doi": null, "abstractUrl": "/proceedings-article/case/2012/06386488/12OmNzBOhMH", "parentPublication": { "id": "proceedings/case/2012/0430/0", "title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }