data
dict
{ "proceeding": { "id": "12OmNCbCrVT", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNvrdHZZ", "doi": "10.1109/CVPR.2014.499", "title": "Bayesian View Synthesis and Image-Based Rendering Principles", "normalizedTitle": "Bayesian View Synthesis and Image-Based Rendering Principles", "abstract": "In this paper, we address the problem of synthesizing novel views from a set of input images. State of the art methods, such as the Unstructured Lumigraph, have been using heuristics to combine information from the original views, often using an explicit or implicit approximation of the scene geometry. While the proposed heuristics have been largely explored and proven to work effectively, a Bayesian formulation was recently introduced, formalizing some of the previously proposed heuristics, pointing out which physical phenomena could lie behind each. However, some important heuristics were still not taken into account and lack proper formalization. We contribute a new physics-based generative model and the corresponding Maximum a Posteriori estimate, providing the desired unification between heuristics-based methods and a Bayesian formulation. The key point is to systematically consider the error induced by the uncertainty in the geometric proxy. We provide an extensive discussion, analyzing how the obtained equations explain the heuristics developed in previous methods. Furthermore, we show that our novel Bayesian model significantly improves the quality of novel views, in particular if the scene geometry estimate is inaccurate.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we address the problem of synthesizing novel views from a set of input images. State of the art methods, such as the Unstructured Lumigraph, have been using heuristics to combine information from the original views, often using an explicit or implicit approximation of the scene geometry. While the proposed heuristics have been largely explored and proven to work effectively, a Bayesian formulation was recently introduced, formalizing some of the previously proposed heuristics, pointing out which physical phenomena could lie behind each. However, some important heuristics were still not taken into account and lack proper formalization. We contribute a new physics-based generative model and the corresponding Maximum a Posteriori estimate, providing the desired unification between heuristics-based methods and a Bayesian formulation. The key point is to systematically consider the error induced by the uncertainty in the geometric proxy. We provide an extensive discussion, analyzing how the obtained equations explain the heuristics developed in previous methods. Furthermore, we show that our novel Bayesian model significantly improves the quality of novel views, in particular if the scene geometry estimate is inaccurate.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we address the problem of synthesizing novel views from a set of input images. State of the art methods, such as the Unstructured Lumigraph, have been using heuristics to combine information from the original views, often using an explicit or implicit approximation of the scene geometry. While the proposed heuristics have been largely explored and proven to work effectively, a Bayesian formulation was recently introduced, formalizing some of the previously proposed heuristics, pointing out which physical phenomena could lie behind each. However, some important heuristics were still not taken into account and lack proper formalization. We contribute a new physics-based generative model and the corresponding Maximum a Posteriori estimate, providing the desired unification between heuristics-based methods and a Bayesian formulation. The key point is to systematically consider the error induced by the uncertainty in the geometric proxy. We provide an extensive discussion, analyzing how the obtained equations explain the heuristics developed in previous methods. Furthermore, we show that our novel Bayesian model significantly improves the quality of novel views, in particular if the scene geometry estimate is inaccurate.", "fno": "5118d906", "keywords": [ "Cameras", "Image Resolution", "Bayes Methods", "Geometry", "Uncertainty", "Rendering Computer Graphics", "Optical Imaging", "Bayesian Framework", "Image Based Rendering", "Depth Uncertainty", "Generative Model" ], "authors": [ { "affiliation": null, "fullName": "Sergi Pujades", "givenName": "Sergi", "surname": "Pujades", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Frederic Devernay", "givenName": "Frederic", "surname": "Devernay", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bastian Goldluecke", "givenName": "Bastian", "surname": "Goldluecke", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "3906-3913", "year": "2014", "issn": "1063-6919", "isbn": "978-1-4799-5118-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5118d898", "articleId": "12OmNA0MZ6e", "__typename": "AdjacentArticleType" }, "next": { "fno": "5118d914", "articleId": "12OmNxR5US6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dcc/2000/0592/0/05920253", "title": "Compression of Lumigraph with Multiple Reference Frame (MRF) Prediction and Just-in-Time Rendering", "doi": null, "abstractUrl": "/proceedings-article/dcc/2000/05920253/12OmNscfI0f", "parentPublication": { "id": "proceedings/dcc/2000/0592/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2008/3358/0/3358a270", "title": "A Parallel Multi-view Rendering Architecture", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2008/3358a270/12OmNwxlrdu", "parentPublication": { "id": "proceedings/sibgrapi/2008/3358/0", "title": "2008 XXI Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890281", "title": "High resolution free-view interpolation of planar structure", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890281/12OmNyL0TqG", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a469", "title": "A Bayesian Approach for Selective Image-Based Rendering Using Superpixels", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a469/12OmNzR8Cwl", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532833", "title": "View selection for volume rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532833/12OmNzmLxRe", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/10/ttg2013101619", "title": "A Spherical Gaussian Framework for Bayesian Monte Carlo Rendering of Glossy Surfaces", "doi": null, "abstractUrl": "/journal/tg/2013/10/ttg2013101619/13rRUwbs20W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2138", "title": "Bayesian Triplet Loss: Uncertainty Quantification in Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2138/1BmIN4kBgJ2", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794622", "title": "Real-Time View Planning for Unstructured Lumigraph Modeling", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794622/1dNHkZwvGik", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/09008541", "title": "Monocular Neural Image Based Rendering With Continuous View Control", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/09008541/1hVlbVEAL3a", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b679", "title": "Multi-View Neural Human Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b679/1m3nrZeJhvy", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1ezRzLyH4bu", "title": "2019 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1ezRB4XSC0E", "doi": "10.1109/3DV.2019.00065", "title": "Learned Multi-View Texture Super-Resolution", "normalizedTitle": "Learned Multi-View Texture Super-Resolution", "abstract": "We present a super-resolution method capable of creating a high-resolution texture map for a virtual 3D object from a set of lower-resolution images of that object. Our architecture unifies the concepts of (i) multi-view super-resolution based on the redundancy of overlapping views and (ii) single-view super-resolution based on a learned prior of high-resolution (HR) image structure. The principle of multi-view super-resolution is to invert the image formation process and recover the latent HR texture from multiple lower-resolution projections. We map that inverse problem into a block of suitably designed neural network layers, and combine it with a standard encoder-decoder network for learned single-image super-resolution. Wiring the image formation model into the network avoids having to learn perspective mapping from textures to images, and elegantly handles a varying number of input views. Experiments demonstrate that the combination of multi-view observations and learned prior yields improved texture maps.", "abstracts": [ { "abstractType": "Regular", "content": "We present a super-resolution method capable of creating a high-resolution texture map for a virtual 3D object from a set of lower-resolution images of that object. Our architecture unifies the concepts of (i) multi-view super-resolution based on the redundancy of overlapping views and (ii) single-view super-resolution based on a learned prior of high-resolution (HR) image structure. The principle of multi-view super-resolution is to invert the image formation process and recover the latent HR texture from multiple lower-resolution projections. We map that inverse problem into a block of suitably designed neural network layers, and combine it with a standard encoder-decoder network for learned single-image super-resolution. Wiring the image formation model into the network avoids having to learn perspective mapping from textures to images, and elegantly handles a varying number of input views. Experiments demonstrate that the combination of multi-view observations and learned prior yields improved texture maps.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a super-resolution method capable of creating a high-resolution texture map for a virtual 3D object from a set of lower-resolution images of that object. Our architecture unifies the concepts of (i) multi-view super-resolution based on the redundancy of overlapping views and (ii) single-view super-resolution based on a learned prior of high-resolution (HR) image structure. The principle of multi-view super-resolution is to invert the image formation process and recover the latent HR texture from multiple lower-resolution projections. We map that inverse problem into a block of suitably designed neural network layers, and combine it with a standard encoder-decoder network for learned single-image super-resolution. Wiring the image formation model into the network avoids having to learn perspective mapping from textures to images, and elegantly handles a varying number of input views. Experiments demonstrate that the combination of multi-view observations and learned prior yields improved texture maps.", "fno": "313100a533", "keywords": [ "Image Reconstruction", "Image Resolution", "Image Texture", "Learning Artificial Intelligence", "Learned Multiview Texture Super Resolution", "Super Resolution Method", "High Resolution Texture Map", "Lower Resolution Images", "Single View Super Resolution", "High Resolution Image Structure", "Multiview Super Resolution", "Image Formation Process", "Latent HR Texture", "Multiple Lower Resolution Projections", "Suitably Designed Neural Network Layers", "Learned Single Image Super Resolution", "Image Formation Model", "Input Views", "Multiview Observations", "Texture Maps", "Three Dimensional Displays", "Computational Modeling", "Surface Reconstruction", "Geometry", "Image Reconstruction", "Texture", "Multi View", "Super Resolution", "Learning", "Neural Network", "Variational Methods" ], "authors": [ { "affiliation": "ETH Zurich", "fullName": "Audrey Richard", "givenName": "Audrey", "surname": "Richard", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich", "fullName": "Ian Cherabier", "givenName": "Ian", "surname": "Cherabier", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich", "fullName": "Martin R. Oswald", "givenName": "Martin R.", "surname": "Oswald", "__typename": "ArticleAuthorType" }, { "affiliation": "IBM Research Zurich", "fullName": "Vagia Tsiminaki", "givenName": "Vagia", "surname": "Tsiminaki", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich, Microsoft Zurich", "fullName": "Marc Pollefeys", "givenName": "Marc", "surname": "Pollefeys", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich", "fullName": "Konrad Schindler", "givenName": "Konrad", "surname": "Schindler", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-09-01T00:00:00", "pubType": "proceedings", "pages": "533-543", "year": "2019", "issn": null, "isbn": "978-1-7281-3131-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "313100a524", "articleId": "1ezREa0LWOQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "313100a544", "articleId": "1ezRBWorrXy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118b502", "title": "High Resolution 3D Shape Texture from Multiple Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b502/12OmNAS9zy5", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/2/3119b145", "title": "Super Resolution of 3D Surface Texture Based on Eigen Images", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119b145/12OmNBC8AAT", "parentPublication": { "id": "proceedings/cisp/2008/3119/3", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rvsp/2013/3184/0/3184a287", "title": "An Efficient Super Resolution Algorithm Using Simple Linear Regression", "doi": null, "abstractUrl": "/proceedings-article/rvsp/2013/3184a287/12OmNvkGW1g", "parentPublication": { "id": "proceedings/rvsp/2013/3184/0", "title": "2013 Second International Conference on Robot, Vision and Signal Processing (RVSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109b820", "title": "Super-Resolution Texture Mapping from Multiple View Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109b820/12OmNwFzNZF", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890540", "title": "Super resolution for multiview mixed resolution images in transform-domain", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890540/12OmNweBUQm", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/06977433", "title": "Super-resolution Reconstruction for Binocular 3D Data", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/06977433/12OmNyvoXhN", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2693", "title": "MNSRNet: Multimodal Transformer Network for 3D Surface Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2693/1H1iQb6SJVK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300h974", "title": "Image Super-Resolution by Neural Texture Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300h974/1gyrHCNnxHW", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f790", "title": "Learning Texture Transformer Network for Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f790/1m3nxZc4396", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b791", "title": "Super-Resolution Appearance Transfer for 4D Human Performances", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b791/1yXsHyUi520", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBOllga", "title": "Augmented Reality, International Symposium on", "acronym": "isar", "groupId": "1000063", "volume": "0", "displayVolume": "0", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNx0RIKx", "doi": "10.1109/ISAR.2000.10003", "title": "Development of the Varioscope AR. A see-through HMD for computer-aided surgery", "normalizedTitle": "Development of the Varioscope AR. A see-through HMD for computer-aided surgery", "abstract": "In computer-aided surgery (CAS), an undesired side-effect of the necessity of handling sophisticated equipment in the operating room is the fact that the surgeon's attention is drawn from the operating field, since surgical progress is partially monitored on the computer's screen. Augmented reality (AR), the overlay of computer-generated graphics over a real-world scene, provides a possibility to solve this problem. The technical problems associated with this approach, such as viewing of the scenery within a common focal range on the head-mounted display (HMD) or latency in the display on the HMD, have, however, kept AR from widespread usage in CAS. The concept of the Varioscope AR, a lightweight head-mounted operating microscope used as a HMD, is introduced. The registration of the patient to the pre-operative image data, as well as pre-operative planning, take place on VISIT, a surgical navigation system developed at our hospital. Tracking of the HMD and stereoscopic visualisation take place on a separate POSIX.4 compliant real-time operating system running on PC hardware. We were able to overcome the technical problems described above; our work resulted in an AR visualisation system with an update rate of 6 Hz and a latency below 130 ms. It integrates seamlessly into a surgical navigation system and provides a common focus for both virtual and real-world objects. First evaluations of the photogrammetric 2D/3D registration have resulted in a match of 1.7 pixels on the HMD display. The Varioscope AR with its real-time visualisation unit is a major step towards the introduction of AR into clinical routine.", "abstracts": [ { "abstractType": "Regular", "content": "In computer-aided surgery (CAS), an undesired side-effect of the necessity of handling sophisticated equipment in the operating room is the fact that the surgeon's attention is drawn from the operating field, since surgical progress is partially monitored on the computer's screen. Augmented reality (AR), the overlay of computer-generated graphics over a real-world scene, provides a possibility to solve this problem. The technical problems associated with this approach, such as viewing of the scenery within a common focal range on the head-mounted display (HMD) or latency in the display on the HMD, have, however, kept AR from widespread usage in CAS. The concept of the Varioscope AR, a lightweight head-mounted operating microscope used as a HMD, is introduced. The registration of the patient to the pre-operative image data, as well as pre-operative planning, take place on VISIT, a surgical navigation system developed at our hospital. Tracking of the HMD and stereoscopic visualisation take place on a separate POSIX.4 compliant real-time operating system running on PC hardware. We were able to overcome the technical problems described above; our work resulted in an AR visualisation system with an update rate of 6 Hz and a latency below 130 ms. It integrates seamlessly into a surgical navigation system and provides a common focus for both virtual and real-world objects. First evaluations of the photogrammetric 2D/3D registration have resulted in a match of 1.7 pixels on the HMD display. The Varioscope AR with its real-time visualisation unit is a major step towards the introduction of AR into clinical routine.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In computer-aided surgery (CAS), an undesired side-effect of the necessity of handling sophisticated equipment in the operating room is the fact that the surgeon's attention is drawn from the operating field, since surgical progress is partially monitored on the computer's screen. Augmented reality (AR), the overlay of computer-generated graphics over a real-world scene, provides a possibility to solve this problem. The technical problems associated with this approach, such as viewing of the scenery within a common focal range on the head-mounted display (HMD) or latency in the display on the HMD, have, however, kept AR from widespread usage in CAS. The concept of the Varioscope AR, a lightweight head-mounted operating microscope used as a HMD, is introduced. The registration of the patient to the pre-operative image data, as well as pre-operative planning, take place on VISIT, a surgical navigation system developed at our hospital. Tracking of the HMD and stereoscopic visualisation take place on a separate POSIX.4 compliant real-time operating system running on PC hardware. We were able to overcome the technical problems described above; our work resulted in an AR visualisation system with an update rate of 6 Hz and a latency below 130 ms. It integrates seamlessly into a surgical navigation system and provides a common focus for both virtual and real-world objects. First evaluations of the photogrammetric 2D/3D registration have resulted in a match of 1.7 pixels on the HMD display. The Varioscope AR with its real-time visualisation unit is a major step towards the introduction of AR into clinical routine.", "fno": "08460054", "keywords": [], "authors": [ { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Wolfgang Birkfellner", "givenName": "Wolfgang", "surname": "Birkfellner", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Klaus Huber", "givenName": "Klaus", "surname": "Huber", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Franz Watzinger", "givenName": "Franz", "surname": "Watzinger", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Michael Figl", "givenName": "Michael", "surname": "Figl", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Felix Wanschitz", "givenName": "Felix", "surname": "Wanschitz", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Rudolf Hanel", "givenName": "Rudolf", "surname": "Hanel", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Dietmar Rafolt", "givenName": "Dietmar", "surname": "Rafolt", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Rolf Ewers", "givenName": "Rolf", "surname": "Ewers", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Vienna General Hospital, Vienna, Austria", "fullName": "Helmar Bergmann", "givenName": "Helmar", "surname": "Bergmann", "__typename": "ArticleAuthorType" } ], "idPrefix": "isar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-10-01T00:00:00", "pubType": "proceedings", "pages": "54", "year": "2000", "issn": null, "isbn": "0-7695-0846-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08460047", "articleId": "12OmNAtst8P", "__typename": "AdjacentArticleType" }, "next": { "fno": "08460060", "articleId": "12OmNzDNtuA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2011/0039/0/05759459", "title": "AR aided implant templating for unilateral fracture reduction and internal fixation surgery", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759459/12OmNCcKQDV", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isar/2001/1375/0/13750020", "title": "Current Status of the Varioscope AR, a Head-Mounted Operating Microscope for Computer-Aided Surgery", "doi": null, "abstractUrl": "/proceedings-article/isar/2001/13750020/12OmNqNos7r", "parentPublication": { "id": "proceedings/isar/2001/1375/0", "title": "Proceedings IEEE and ACM International Symposium on Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isar/2000/0846/0/08460068", "title": "Development of a stereo video see-through HMD for AR systems", "doi": null, "abstractUrl": "/proceedings-article/isar/2000/08460068/12OmNwNeYAd", "parentPublication": { "id": "proceedings/isar/2000/0846/0", "title": "Augmented Reality, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671837", "title": "Using a HHD with a HMD for mobile AR interaction", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671837/12OmNyFCvQq", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a437", "title": "AR HMD for Remote Instruction in Healthcare", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a437/1CJd7HBOPPW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10077744", "title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance", "doi": null, "abstractUrl": "/journal/tg/5555/01/10077744/1LH8EZ3NEGI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a526", "title": "Text Selection in AR-HMD Using a Smartphone as an Input Device", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a526/1tnXhwEI6RO", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYoKmw", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNyxFKaD", "doi": "10.1109/ISMAR.2013.6671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "normalizedTitle": "Interaction techniques for HMD-HHD hybrid AR systems", "abstract": "Most mobile Augmented Reality (AR) systems use either a head mounted display (HMD) or a handheld display (HHD) as a hardware platform. As mobile devices become more affordable, it becomes more common that users own more than one mobile device and use them together. In this research, we investigate Hybrid AR systems that use both HMD and HHD for AR visualization and interaction. In addition to a simple approach of using HMD as a display and HHD as an input device (e.g. a touch pad or a pointer), we further explore novel interaction techniques that can take advantage of having both HMD and HHD closely integrated into one AR system, such as cross-device information sharing, situation adaptive visualization management, and multi-layered visualization.", "abstracts": [ { "abstractType": "Regular", "content": "Most mobile Augmented Reality (AR) systems use either a head mounted display (HMD) or a handheld display (HHD) as a hardware platform. As mobile devices become more affordable, it becomes more common that users own more than one mobile device and use them together. In this research, we investigate Hybrid AR systems that use both HMD and HHD for AR visualization and interaction. In addition to a simple approach of using HMD as a display and HHD as an input device (e.g. a touch pad or a pointer), we further explore novel interaction techniques that can take advantage of having both HMD and HHD closely integrated into one AR system, such as cross-device information sharing, situation adaptive visualization management, and multi-layered visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most mobile Augmented Reality (AR) systems use either a head mounted display (HMD) or a handheld display (HHD) as a hardware platform. As mobile devices become more affordable, it becomes more common that users own more than one mobile device and use them together. In this research, we investigate Hybrid AR systems that use both HMD and HHD for AR visualization and interaction. In addition to a simple approach of using HMD as a display and HHD as an input device (e.g. a touch pad or a pointer), we further explore novel interaction techniques that can take advantage of having both HMD and HHD closely integrated into one AR system, such as cross-device information sharing, situation adaptive visualization management, and multi-layered visualization.", "fno": "06671786", "keywords": [ "Visualization", "Augmented Reality", "Mobile Communication", "Three Dimensional Displays", "Mobile Handsets", "Wearable Computers", "Hardware", "Handheld Display", "Mobile Augmented Reality", "Wearable Computer", "Headmounted Display" ], "authors": [ { "affiliation": "Human Interface Technol. Lab. New Zealand, Univ. of Canterbury, Christchurch, New Zealand", "fullName": "Rahul Budhiraja", "givenName": "Rahul", "surname": "Budhiraja", "__typename": "ArticleAuthorType" }, { "affiliation": "Human Interface Technol. Lab. New Zealand, Univ. of Canterbury, Christchurch, New Zealand", "fullName": "Gun A. Lee", "givenName": "Gun A.", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Human Interface Technol. Lab. New Zealand, Univ. of Canterbury, Christchurch, New Zealand", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "243-244", "year": "2013", "issn": null, "isbn": "978-1-4799-2869-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06671785", "articleId": "12OmNwwuDZl", "__typename": "AdjacentArticleType" }, "next": { "fno": "06671787", "articleId": "12OmNznkK6h", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223368", "title": "Dynamic hierarchical virtual button-based hand interaction for wearable AR", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223368/12OmNAMbZFA", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671823", "title": "Ego- and Exocentric interaction for mobile AR conferencing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671823/12OmNASraUi", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-amh/2010/9339/0/05643296", "title": "An integrated design flow in user interface and interaction for enhancing mobile AR gaming experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar-amh/2010/05643296/12OmNBE7Moa", "parentPublication": { "id": "proceedings/ismar-amh/2010/9339/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671837", "title": "Using a HHD with a HMD for mobile AR interaction", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671837/12OmNyFCvQq", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460065", "title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2018/7759/0/775900a209", "title": "Exploring Augmented Reality Interaction for Everyday Multipurpose Wearable Robots", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2018/775900a209/17D45WaTkd9", "parentPublication": { "id": "proceedings/rtcsa/2018/7759/0", "title": "2018 IEEE 24th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2018/7445/0/744500a076", "title": "Design of Spot Introduction and User Interaction System Based on AR Augmented Reality Technology", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2018/744500a076/17D45XwUAGV", "parentPublication": { "id": "proceedings/dcabes/2018/7445/0", "title": "2018 17th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a526", "title": "Text Selection in AR-HMD Using a Smartphone as an Input Device", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a526/1tnXhwEI6RO", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a324", "title": "Towards In-situ Authoring of AR Visualizations with Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a324/1yeQJrGq6WI", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1eOELLz", "doi": "10.1109/VR.2018.8446560", "title": "An AR-Guided System for Fast Image-Based Modeling of Indoor Scenes", "normalizedTitle": "An AR-Guided System for Fast Image-Based Modeling of Indoor Scenes", "abstract": "We present a system that enables a novice user to acquire a large indoor scene in minutes as a collection of images that are sufficient for five degrees-of-freedom virtual navigation by image morphing. The user walks through the scene wearing an augmented reality head-mounted display (AR HMD) enhanced with a panoramic video camera. The AR HMD visualizes a 2D grid partitioning of a dynamically generated floor plan, which guides the user to acquire a panorama from each grid cell. The panoramas are registered offline using both AR HMD tracking data and structure-from - motion tools. Feature correspondences are established between neighboring panoramas. The resulting panoramas and correspondences support interactive rendering via image morphing with any view direction and from any viewpoint on the acquisition plane.", "abstracts": [ { "abstractType": "Regular", "content": "We present a system that enables a novice user to acquire a large indoor scene in minutes as a collection of images that are sufficient for five degrees-of-freedom virtual navigation by image morphing. The user walks through the scene wearing an augmented reality head-mounted display (AR HMD) enhanced with a panoramic video camera. The AR HMD visualizes a 2D grid partitioning of a dynamically generated floor plan, which guides the user to acquire a panorama from each grid cell. The panoramas are registered offline using both AR HMD tracking data and structure-from - motion tools. Feature correspondences are established between neighboring panoramas. The resulting panoramas and correspondences support interactive rendering via image morphing with any view direction and from any viewpoint on the acquisition plane.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a system that enables a novice user to acquire a large indoor scene in minutes as a collection of images that are sufficient for five degrees-of-freedom virtual navigation by image morphing. The user walks through the scene wearing an augmented reality head-mounted display (AR HMD) enhanced with a panoramic video camera. The AR HMD visualizes a 2D grid partitioning of a dynamically generated floor plan, which guides the user to acquire a panorama from each grid cell. The panoramas are registered offline using both AR HMD tracking data and structure-from - motion tools. Feature correspondences are established between neighboring panoramas. The resulting panoramas and correspondences support interactive rendering via image morphing with any view direction and from any viewpoint on the acquisition plane.", "fno": "08446560", "keywords": [ "Augmented Reality", "Helmet Mounted Displays", "Image Morphing", "Rendering Computer Graphics", "Video Cameras", "2 D Grid Partitioning", "Floor Plan", "Grid Cell", "Image Morphing", "Fast Image Based Modeling", "Indoor Scene", "Novice User", "Degrees Of Freedom Virtual Navigation", "Augmented Reality Head Mounted Display", "Panoramic Video Camera", "AR Guided System", "Interactive Rendering", "Acquisition Plane", "AR HMD Tracking Data", "Cameras", "Resists", "Two Dimensional Displays", "Three Dimensional Displays", "Visualization", "Solid Modeling", "Navigation", "Human Centered Computing Mixed Augmented Reality", "Computing Methodologies Virtual Reality", "Computing Methodologies Image Based Rendering" ], "authors": [ { "affiliation": "Purdue University", "fullName": "Daniel Andersen", "givenName": "Daniel", "surname": "Andersen", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Voicu Popescu", "givenName": "Voicu", "surname": "Popescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "501-502", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446481", "articleId": "13bd1AITnaR", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446569", "articleId": "13bd1ftOBCI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699284", "title": "[Poster] Using an Industry-Ready AR HMD on a Real Maintenance Task: AR Benefits Performance on Certain Task Steps More Than Others", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699284/19F1VfaPMhW", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a437", "title": "AR HMD for Remote Instruction in Healthcare", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a437/1CJd7HBOPPW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a378", "title": "Enhancing the Reading Experience on AR HMDs by Using Smartphones as Assistive Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a378/1MNgGafxH4Q", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a096", "title": "Stepping over Obstacles with Augmented Reality based on Visual Exproprioception", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a096/1pBMiFPYlkA", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a611", "title": "User study of an AR reading aid system to promote deep reading", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a611/1tnWMj0AXn2", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a446", "title": "AREarthQuakeDrill: Toward Increased Awareness of Personnel during Earthquakes via AR Evacuation Drills", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a446/1tnWN99G6TS", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a237", "title": "A Large-Scale Indoor Layout Reconstruction and Localization System for Spatial-Aware Mobile AR Applications", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a237/1zxLwu0YJqw", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1VfaPMhW", "doi": "10.1109/ISMAR-Adjunct.2018.00075", "title": "[Poster] Using an Industry-Ready AR HMD on a Real Maintenance Task: AR Benefits Performance on Certain Task Steps More Than Others", "normalizedTitle": "[Poster] Using an Industry-Ready AR HMD on a Real Maintenance Task: AR Benefits Performance on Certain Task Steps More Than Others", "abstract": "This paper presents a novel evaluation of an industry-ready HMD for delivering AR work instructions in a real-life, industrial procedure for novice users. A user study was performed to examine the potential benefits and limitations of a dynamic 3D virtual model and AR text instructions, delivered through an optical see through HMD, for training users in a new industry procedure (i.e., Yaw Motor Servicing of a wind turbine). Measures of task accuracy and completion time were used to evaluate the performance of one group of mechanical engineering students performing this procedure for the first time guided by AR compared to a second group performing it using a tablet-delivered instruction manual. Results showed AR improved accuracy but not speed of task completion. AR significantly increased accuracy on one specific task-step in the procedure, namely measurement of a thin air gap (see figure 1, left panel), but also showed limitations with other task-steps not benefitting or even being slowed down by AR (see figure 1, right panel). Findings speak to the importance of incorporating an analysis at the level of individual task steps in order to fully evaluate AR work instructions.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel evaluation of an industry-ready HMD for delivering AR work instructions in a real-life, industrial procedure for novice users. A user study was performed to examine the potential benefits and limitations of a dynamic 3D virtual model and AR text instructions, delivered through an optical see through HMD, for training users in a new industry procedure (i.e., Yaw Motor Servicing of a wind turbine). Measures of task accuracy and completion time were used to evaluate the performance of one group of mechanical engineering students performing this procedure for the first time guided by AR compared to a second group performing it using a tablet-delivered instruction manual. Results showed AR improved accuracy but not speed of task completion. AR significantly increased accuracy on one specific task-step in the procedure, namely measurement of a thin air gap (see figure 1, left panel), but also showed limitations with other task-steps not benefitting or even being slowed down by AR (see figure 1, right panel). Findings speak to the importance of incorporating an analysis at the level of individual task steps in order to fully evaluate AR work instructions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel evaluation of an industry-ready HMD for delivering AR work instructions in a real-life, industrial procedure for novice users. A user study was performed to examine the potential benefits and limitations of a dynamic 3D virtual model and AR text instructions, delivered through an optical see through HMD, for training users in a new industry procedure (i.e., Yaw Motor Servicing of a wind turbine). Measures of task accuracy and completion time were used to evaluate the performance of one group of mechanical engineering students performing this procedure for the first time guided by AR compared to a second group performing it using a tablet-delivered instruction manual. Results showed AR improved accuracy but not speed of task completion. AR significantly increased accuracy on one specific task-step in the procedure, namely measurement of a thin air gap (see figure 1, left panel), but also showed limitations with other task-steps not benefitting or even being slowed down by AR (see figure 1, right panel). Findings speak to the importance of incorporating an analysis at the level of individual task steps in order to fully evaluate AR work instructions.", "fno": "08699284", "keywords": [ "Helmet Mounted Displays", "Human Computer Interaction", "Human Factors", "Maintenance Engineering", "Mechanical Engineering Computing", "Virtual Reality", "Wind Turbines", "Completion Time", "Mechanical Engineering Students", "Tablet Delivered Instruction Manual", "AR Improved Accuracy", "Task Completion", "Task Steps", "Individual Task Steps", "AR Work Instructions", "Maintenance Task", "AR Benefits Performance", "Industry Ready HMD", "Industrial Procedure", "Novice Users", "User Study", "Training Users", "Industry Procedure", "Wind Turbine", "Task Accuracy", "Yaw Motor Servicing", "Task Analysis", "Resists", "Three Dimensional Displays", "Industries", "Maintenance Engineering", "Solid Modeling", "Manuals", "Augmented Reality", "Providing Instructions", "Maintenance", "Workpiece", "Head Mounted Displays", "H 5 2 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities", "H 5 2 Information Interfaces And Presentation User Interfaces Training Help And Documentation" ], "authors": [ { "affiliation": "Trinity College Dublin", "fullName": "Andrew Princle", "givenName": "Andrew", "surname": "Princle", "__typename": "ArticleAuthorType" }, { "affiliation": "University College Dublin", "fullName": "Abraham G. Campbell", "givenName": "Abraham G.", "surname": "Campbell", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Stefanie Hutka", "givenName": "Stefanie", "surname": "Hutka", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Alberto Torrasso", "givenName": "Alberto", "surname": "Torrasso", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Colin Couper", "givenName": "Colin", "surname": "Couper", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Fabian Strunden", "givenName": "Fabian", "surname": "Strunden", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Jan Bajana", "givenName": "Jan", "surname": "Bajana", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Kamil Jastząb", "givenName": "Kamil", "surname": "Jastząb", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Ralph Croly", "givenName": "Ralph", "surname": "Croly", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Rob Quigley", "givenName": "Rob", "surname": "Quigley", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Ross McKiernan", "givenName": "Ross", "surname": "McKiernan", "__typename": "ArticleAuthorType" }, { "affiliation": "DAQRI", "fullName": "Paul Sweeney", "givenName": "Paul", "surname": "Sweeney", "__typename": "ArticleAuthorType" }, { "affiliation": "University College Dublin", "fullName": "Mark T. Keane", "givenName": "Mark T.", "surname": "Keane", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "236-241", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699205", "articleId": "19F1ODVNNkY", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699283", "articleId": "19F1V9Ax9Be", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b244", "title": "Object State Recognition for Automatic AR-Based Maintenance Guidance", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b244/12OmNAGw13Z", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948404", "title": "AR-mentor: Augmented reality based mentoring system", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948404/12OmNvnOwuE", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836522", "title": "Exploring Immersive AR Instructions for Procedural Tasks: The Role of Depth, Motion, and Volumetric Representations", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836522/12OmNxETajV", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a052", "title": "[POSTER] Hybrid Video/Optical See-Through HMD", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a134", "title": "Comparing HMD-Based and Paper-Based Training", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a134/17D45WrVgdI", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a437", "title": "AR HMD for Remote Instruction in Healthcare", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a437/1CJd7HBOPPW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a460", "title": "Selection Techniques for 3D Extended Desktop Workstation with AR HMD", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a460/1JrR6BnYp6U", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a034", "title": "AR Tips: Augmented First-Person View Task Instruction Videos", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a034/1gysm0mzZlK", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a174", "title": "An AR Work Instructions Authoring Tool for Human-Operated Industrial Assembly Lines", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a174/1qpzDvRJytG", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJd7HBOPPW", "doi": "10.1109/VRW55335.2022.00096", "title": "AR HMD for Remote Instruction in Healthcare", "normalizedTitle": "AR HMD for Remote Instruction in Healthcare", "abstract": "In the following position paper we introduce the use of AR HMD for remote instruction in healthcare and present the challenges our team has faced in achieving this application in two contexts: surgical telementoring and paramedic teleconsulting. After the presentation of how these challenges come to be and indications on how to address them, we argue that those who wish to pursue this area of research must be grounded in best practices from the field of CSCW integrated with technical innovations in AR interaction development. This is a truly interdisciplinary research and development area that has many challenging topics to tackle through collaborative efforts.", "abstracts": [ { "abstractType": "Regular", "content": "In the following position paper we introduce the use of AR HMD for remote instruction in healthcare and present the challenges our team has faced in achieving this application in two contexts: surgical telementoring and paramedic teleconsulting. After the presentation of how these challenges come to be and indications on how to address them, we argue that those who wish to pursue this area of research must be grounded in best practices from the field of CSCW integrated with technical innovations in AR interaction development. This is a truly interdisciplinary research and development area that has many challenging topics to tackle through collaborative efforts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the following position paper we introduce the use of AR HMD for remote instruction in healthcare and present the challenges our team has faced in achieving this application in two contexts: surgical telementoring and paramedic teleconsulting. After the presentation of how these challenges come to be and indications on how to address them, we argue that those who wish to pursue this area of research must be grounded in best practices from the field of CSCW integrated with technical innovations in AR interaction development. This is a truly interdisciplinary research and development area that has many challenging topics to tackle through collaborative efforts.", "fno": "840200a437", "keywords": [ "Augmented Reality", "Groupware", "Health Care", "Helmet Mounted Displays", "Surgery", "Telemedicine", "AR HMD", "Remote Instruction", "Healthcare", "Surgical Telementoring", "Paramedic Teleconsulting", "Technological Innovation", "Three Dimensional Displays", "Conferences", "Collaboration", "Surgery", "Medical Services", "Resists", "Human Centered Computing X 2014 Visualization X 2014" ], "authors": [ { "affiliation": "University of Maryland Baltimore County", "fullName": "Helena M. Mentis", "givenName": "Helena M.", "surname": "Mentis", "__typename": "ArticleAuthorType" }, { "affiliation": "Sorbonne Universite, CNRS, ISIR", "fullName": "Ignacio Avellino", "givenName": "Ignacio", "surname": "Avellino", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland Baltimore County", "fullName": "Jwawon Seo", "givenName": "Jwawon", "surname": "Seo", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "437-440", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a434", "articleId": "1CJcL6rwOqY", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a441", "articleId": "1CJd8CwG0U0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948442", "title": "[Poster] HMD Video see though AR with unfixed cameras vergence", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948442/12OmNB0nWd6", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/var4good/2018/5977/0/08576884", "title": "Augmented Visual Instruction for Surgical Practice and Training", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699305", "title": "A First-Person Mentee Second-Person Mentor AR Interface for Surgical Telementoring", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699305/19F1TZ6RppS", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699284", "title": "[Poster] Using an Industry-Ready AR HMD on a Real Maintenance Task: AR Benefits Performance on Certain Task Steps More Than Others", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699284/19F1VfaPMhW", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ainit/2021/1296/0/129600a226", "title": "Remote Cooperation System of Electric Power Communication Operation Inspection Based on AR Technology", "doi": null, "abstractUrl": "/proceedings-article/ainit/2021/129600a226/1BzWyBFkWKA", "parentPublication": { "id": "proceedings/ainit/2021/1296/0", "title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a702", "title": "The Virtual-Augmented Reality Simulator: Evaluating OST-HMD AR calibration algorithms in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a702/1CJe0D4B6b6", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a460", "title": "Selection Techniques for 3D Extended Desktop Workstation with AR HMD", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a460/1JrR6BnYp6U", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089635", "title": "How About the Mentor? Effective Workspace Visualization in AR Telementoring", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089635/1jIxa5RvyIo", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1Hcn4kmUyR2", "title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)", "acronym": "gcrait", "groupId": "1847864", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1HcngrngKC4", "doi": "10.1109/GCRAIT55928.2022.00085", "title": "Research on remote guidance of hardware operation and maintenance of computer room based on AR", "normalizedTitle": "Research on remote guidance of hardware operation and maintenance of computer room based on AR", "abstract": "At present, AR remote guidance used in computer room operation and maintenance inspection usually uses circle annotation and arrow as annotation guidance. The guidance of operation mode is not clear enough, and primary operation and maintenance personnel are still difficult to understand. Therefore, a remote guidance method of computer room hardware operation and maintenance based on 3D dynamic editing AR is proposed. The 3D model of computer room hardware is used as the scene basis, the experts edit, annotate the model remotely. Finally, the dynamic operation guidance is sent to the AR display device of the field operator to form the AR dynamic operation guidance of the virtual and real fusion. The experimental results using this method show that the 3D remote annotation can effectively realize the AR remote guidance. Compared with the screenshot operation guidance, the guidance efficiency and accuracy are higher, and the field operators receive better information.", "abstracts": [ { "abstractType": "Regular", "content": "At present, AR remote guidance used in computer room operation and maintenance inspection usually uses circle annotation and arrow as annotation guidance. The guidance of operation mode is not clear enough, and primary operation and maintenance personnel are still difficult to understand. Therefore, a remote guidance method of computer room hardware operation and maintenance based on 3D dynamic editing AR is proposed. The 3D model of computer room hardware is used as the scene basis, the experts edit, annotate the model remotely. Finally, the dynamic operation guidance is sent to the AR display device of the field operator to form the AR dynamic operation guidance of the virtual and real fusion. The experimental results using this method show that the 3D remote annotation can effectively realize the AR remote guidance. Compared with the screenshot operation guidance, the guidance efficiency and accuracy are higher, and the field operators receive better information.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "At present, AR remote guidance used in computer room operation and maintenance inspection usually uses circle annotation and arrow as annotation guidance. The guidance of operation mode is not clear enough, and primary operation and maintenance personnel are still difficult to understand. Therefore, a remote guidance method of computer room hardware operation and maintenance based on 3D dynamic editing AR is proposed. The 3D model of computer room hardware is used as the scene basis, the experts edit, annotate the model remotely. Finally, the dynamic operation guidance is sent to the AR display device of the field operator to form the AR dynamic operation guidance of the virtual and real fusion. The experimental results using this method show that the 3D remote annotation can effectively realize the AR remote guidance. Compared with the screenshot operation guidance, the guidance efficiency and accuracy are higher, and the field operators receive better information.", "fno": "819200a373", "keywords": [ "Courseware", "Human Computer Interaction", "Inspection", "Maintenance Engineering", "Ontologies Artificial Intelligence", "3 D Remote Annotation", "AR Remote Guidance", "Screenshot Operation Guidance", "Guidance Efficiency", "Field Operator", "Computer Room Operation", "Maintenance Inspection", "Circle Annotation", "Arrow", "Annotation Guidance", "Operation Mode", "Primary Operation", "Maintenance Personnel", "Remote Guidance Method", "Computer Room Hardware Operation", "3 D Dynamic Editing AR", "AR Dynamic Operation Guidance", "Solid Modeling", "Three Dimensional Displays", "Annotations", "Computational Modeling", "Maintenance Engineering", "Inspection", "Hardware", "Augmented Reality", "Remote Guidance", "Visualization", "Operation And Maintenance" ], "authors": [ { "affiliation": "CSG Power Generation Co., Ltd,Guangdong,China", "fullName": "Zhu Zhu", "givenName": "Zhu", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "CSG Power Generation Co., Ltd,Guangdong,China", "fullName": "Jianlu Li", "givenName": "Jianlu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "CSG Power Generation Co., Ltd,Guangdong,China", "fullName": "Weiming He", "givenName": "Weiming", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "CSG Power Generation Co., Ltd,Guangdong,China", "fullName": "Shaofeng Yu", "givenName": "Shaofeng", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "CSG Power Generation Co., Ltd,Guangdong,China", "fullName": "Yuxi Ma", "givenName": "Yuxi", "surname": "Ma", "__typename": "ArticleAuthorType" } ], "idPrefix": "gcrait", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "373-376", "year": "2022", "issn": null, "isbn": "978-1-6654-8192-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "819200a365", "articleId": "1HcnaaPORQQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "819200a377", "articleId": "1Hcn6nj0TfO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b244", "title": "Object State Recognition for Automatic AR-Based Maintenance Guidance", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b244/12OmNAGw13Z", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a065", "title": "VR&AR Combined Manual Operation Instruction System on Industry Products: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a065/12OmNBUS73y", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948436", "title": "[Poster] Ongoing development of a user-centered, AR testbed in industry", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948436/12OmNs59JLY", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a058", "title": "[POSTER] Planning-Based Workflow Modeling for AR-enabled Automated Task Guidance", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a058/12OmNvjgWFr", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2021/0770/0/077000a137", "title": "A Digital Twin Based Design of the Semi-physical Marine Engine Room Simulator for Remote Maintenance Assistance", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2021/077000a137/1APq5xCkvUk", "parentPublication": { "id": "proceedings/icvisp/2021/0770/0", "title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ainit/2021/1296/0/129600a226", "title": "Remote Cooperation System of Electric Power Communication Operation Inspection Based on AR Technology", "doi": null, "abstractUrl": "/proceedings-article/ainit/2021/129600a226/1BzWyBFkWKA", "parentPublication": { "id": "proceedings/ainit/2021/1296/0", "title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a714", "title": "Does Remote Expert Representation really matters: A comparison of Video and AR-based Guidance", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a714/1CJcBSlQWNa", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcrait/2022/8192/0/819200a328", "title": "Design of augmented reality-based visualization system for server room operation and maintenance", "doi": null, "abstractUrl": "/proceedings-article/gcrait/2022/819200a328/1Hcnj1051Ju", "parentPublication": { "id": "proceedings/gcrait/2022/8192/0", "title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a720", "title": "Co-Design of an Augmented Reality Maintenance Tool for Gas Pressure Regulation Stations", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a720/1J7WoroPd5e", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a104", "title": "Integrating AR and VR for Mobile Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a104/1gysoJbmNEI", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7WvwZew9O", "doi": "10.1109/ISMAR-Adjunct57072.2022.00175", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "normalizedTitle": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "abstract": "We propose a system concept for Augmented Reality Head-Mounted Display users, which supports multitask viewing with multiple vir-tual workspaces anchored in the real-world space. Although people encounter multitasking necessities frequently, the native AR HMD and existing interfaces lack measures to visualize multiple sets of spatially-anchored information in parallel. The system separately vi-sualizes two different sets of spatially-anchored information, one on each AR HMD and smartphone, enabling side-by-side multitasking on AR HMD without applying heavy load on users. We implemented a proof-of-concept prototype that allows side-by-side viewing of the two different virtual workspaces. The proposed concept shows promises of multitasking on AR HMD, and future research will de-velop the system to be fully functional and verified with user studies.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a system concept for Augmented Reality Head-Mounted Display users, which supports multitask viewing with multiple vir-tual workspaces anchored in the real-world space. Although people encounter multitasking necessities frequently, the native AR HMD and existing interfaces lack measures to visualize multiple sets of spatially-anchored information in parallel. The system separately vi-sualizes two different sets of spatially-anchored information, one on each AR HMD and smartphone, enabling side-by-side multitasking on AR HMD without applying heavy load on users. We implemented a proof-of-concept prototype that allows side-by-side viewing of the two different virtual workspaces. The proposed concept shows promises of multitasking on AR HMD, and future research will de-velop the system to be fully functional and verified with user studies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a system concept for Augmented Reality Head-Mounted Display users, which supports multitask viewing with multiple vir-tual workspaces anchored in the real-world space. Although people encounter multitasking necessities frequently, the native AR HMD and existing interfaces lack measures to visualize multiple sets of spatially-anchored information in parallel. The system separately vi-sualizes two different sets of spatially-anchored information, one on each AR HMD and smartphone, enabling side-by-side multitasking on AR HMD without applying heavy load on users. We implemented a proof-of-concept prototype that allows side-by-side viewing of the two different virtual workspaces. The proposed concept shows promises of multitasking on AR HMD, and future research will de-velop the system to be fully functional and verified with user studies.", "fno": "536500a812", "keywords": [ "Augmented Reality", "Data Visualisation", "Helmet Mounted Displays", "AR HMD Multitask Viewing System Concept", "Augmented Reality Head Mounted Display Users", "Handheld Viewport", "Multiple Spatially Anchored Workspaces", "Multiple Virtual Workspaces", "Multitasking Necessities", "Proof Of Concept Prototype", "Real World Space", "Side By Side Multitasking", "Side By Side Viewing", "Smartphone", "Spatially Anchored Information", "Virtual Workspaces", "Visualization", "Head Mounted Displays", "Prototypes", "Resists", "Multitasking", "Augmented Reality", "Human Centered Computing Mixed Augmented Reality", "Human Centered Computing User Interface Programming" ], "authors": [ { "affiliation": "KAIST UVR Lab", "fullName": "Seo Young Oh", "givenName": "Seo Young", "surname": "Oh", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab", "fullName": "Boram Yoon", "givenName": "Boram", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab", "fullName": "Woontack Woo", "givenName": "Woontack", "surname": "Woo", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "812-813", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a810", "articleId": "1J7WvjShlD2", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a814", "articleId": "1J7WjyIbnrO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a584", "title": "Investigating Display Position of a Head-Fixed Augmented Reality Notification for Dual-task", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a584/1CJd297BiDu", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a640", "title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a870", "title": "X-Ray Device Positioning with Augmented Reality Visual Feedback", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a870/1CJfmBHBffW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a686", "title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a470", "title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a039", "title": "Designing a Multitasking Interface for Object-aware AR applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a039/1pBMfjaOy08", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a649", "title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a665", "title": "Supporting Medical Auxiliary Work: The Central Sterile Services Department as a Challenging Environment for Augmented Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a665/1pysyCXzE8o", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JrQPhTSspy", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JrR6BnYp6U", "doi": "10.1109/ISMAR55827.2022.00062", "title": "Selection Techniques for 3D Extended Desktop Workstation with AR HMD", "normalizedTitle": "Selection Techniques for 3D Extended Desktop Workstation with AR HMD", "abstract": "Extending a standard desktop workstation (i.e. a screen, a mouse, a keyboard) with virtual scenes displayed on an Augmented Reality Head-Mounted Display (AR HMD) offers many identified advantages including limited physical space requirements, very large and flexible display spaces, and 3D stereoscopic views. While the technologies become more mainstream, the remaining open question is how to interact with such hybrid workstations that combine 2D views displayed on a physical monitor and 3D views displayed on a HoloLens. For a selection task, we compared mouse-based interaction (standard for 2D desktop workstations) and direct touch interaction in mid-air (standard for 3D AR) while considering different positions of the 3D scene according to a physical monitor. To extend mouse-based selection to 3D views, we experimentally explored different interaction metaphors where the mouse cursor moves either on a horizontal or a vertical plane in a 3D virtual scene. To check for ecological validity of our results, we conducted an additional study focusing on interaction with a 2D/3D Gapminder dataset visualization. The results show 1) that the mouse-based interaction, as compared to direct touch interaction in mid-air, is easy and efficient, 2) that using a vertical plane placed in front of the 3D virtual scene to mimic the double screen metaphor outperforms other interaction techniques and 3) that flexibility is required to allow users to choose the selection techniques and to position the 3D virtual scene relative to the physical monitor. Based on these results, we derive interaction design guidelines for hybrid workstations.", "abstracts": [ { "abstractType": "Regular", "content": "Extending a standard desktop workstation (i.e. a screen, a mouse, a keyboard) with virtual scenes displayed on an Augmented Reality Head-Mounted Display (AR HMD) offers many identified advantages including limited physical space requirements, very large and flexible display spaces, and 3D stereoscopic views. While the technologies become more mainstream, the remaining open question is how to interact with such hybrid workstations that combine 2D views displayed on a physical monitor and 3D views displayed on a HoloLens. For a selection task, we compared mouse-based interaction (standard for 2D desktop workstations) and direct touch interaction in mid-air (standard for 3D AR) while considering different positions of the 3D scene according to a physical monitor. To extend mouse-based selection to 3D views, we experimentally explored different interaction metaphors where the mouse cursor moves either on a horizontal or a vertical plane in a 3D virtual scene. To check for ecological validity of our results, we conducted an additional study focusing on interaction with a 2D/3D Gapminder dataset visualization. The results show 1) that the mouse-based interaction, as compared to direct touch interaction in mid-air, is easy and efficient, 2) that using a vertical plane placed in front of the 3D virtual scene to mimic the double screen metaphor outperforms other interaction techniques and 3) that flexibility is required to allow users to choose the selection techniques and to position the 3D virtual scene relative to the physical monitor. Based on these results, we derive interaction design guidelines for hybrid workstations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Extending a standard desktop workstation (i.e. a screen, a mouse, a keyboard) with virtual scenes displayed on an Augmented Reality Head-Mounted Display (AR HMD) offers many identified advantages including limited physical space requirements, very large and flexible display spaces, and 3D stereoscopic views. While the technologies become more mainstream, the remaining open question is how to interact with such hybrid workstations that combine 2D views displayed on a physical monitor and 3D views displayed on a HoloLens. For a selection task, we compared mouse-based interaction (standard for 2D desktop workstations) and direct touch interaction in mid-air (standard for 3D AR) while considering different positions of the 3D scene according to a physical monitor. To extend mouse-based selection to 3D views, we experimentally explored different interaction metaphors where the mouse cursor moves either on a horizontal or a vertical plane in a 3D virtual scene. To check for ecological validity of our results, we conducted an additional study focusing on interaction with a 2D/3D Gapminder dataset visualization. The results show 1) that the mouse-based interaction, as compared to direct touch interaction in mid-air, is easy and efficient, 2) that using a vertical plane placed in front of the 3D virtual scene to mimic the double screen metaphor outperforms other interaction techniques and 3) that flexibility is required to allow users to choose the selection techniques and to position the 3D virtual scene relative to the physical monitor. Based on these results, we derive interaction design guidelines for hybrid workstations.", "fno": "532500a460", "keywords": [ "Augmented Reality", "Data Visualisation", "Graphical User Interfaces", "Handicapped Aids", "Helmet Mounted Displays", "Mouse Controllers Computers", "Virtual Reality", "2 D Desktop Workstations", "3 D Stereoscopic Views", "3 D Virtual Scene", "Augmented Reality Head Mounted Display", "Different Interaction Metaphors", "Direct Touch Interaction", "Flexible Display Spaces", "Hybrid Workstations", "Interaction Design Guidelines", "Interaction Techniques", "Mouse Cursor", "Mouse Based Interaction", "Mouse Based Selection", "Physical Monitor", "Physical Space Requirements", "Selection Task", "Selection Techniques", "Standard Desktop Workstation", "Virtual Scenes", "Visualization", "Three Dimensional Displays", "Stereo Image Processing", "Resists", "Mice", "Workstations", "Task Analysis", "Human Centered Computing", "Visualization", "Interaction Techniques", "Hybrid 2 D 3 D Workstation", "AR" ], "authors": [ { "affiliation": "Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG,Grenoble,France,F-38000", "fullName": "Carole Plasson", "givenName": "Carole", "surname": "Plasson", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG,Grenoble,France,F-38000", "fullName": "Renaud Blanch", "givenName": "Renaud", "surname": "Blanch", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG,Grenoble,France,F-38000", "fullName": "Laurence Nigay", "givenName": "Laurence", "surname": "Nigay", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "460-469", "year": "2022", "issn": "1554-7868", "isbn": "978-1-6654-5325-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1JrR6vq74CQ", "name": "pismar202253250-09995594s1-mm_532500a460.zip", "size": "42 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995594s1-mm_532500a460.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "532500a450", "articleId": "1JrQVmURYMo", "__typename": "AdjacentArticleType" }, "next": { "fno": "532500a470", "articleId": "1JrQZ2SKCuQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836451", "title": "DualCAD: Integrating Augmented Reality with a Desktop GUI and Smartphone Interaction", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836451/12OmNAndit1", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2018/2290/0/08343267", "title": "Immersive gesture interfaces for 3D map navigation in HMD-based virtual environments", "doi": null, "abstractUrl": "/proceedings-article/icoin/2018/08343267/12OmNvD8Rwt", "parentPublication": { "id": "proceedings/icoin/2018/2290/0", "title": "2018 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184195", "title": "Comparison of a two-handed interface to a wand interface and a mouse interface for fundamental 3D tasks", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184195/12OmNwIHow8", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184186", "title": "Design and evaluation of 3D cursors and motion parallax for the exploration of desktop virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184186/12OmNx5piWT", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798842", "title": "HybridSpace: Integrating 3D freehand input and stereo viewing into traditional desktop applications", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798842/12OmNyUnEGq", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607621", "title": "Investigation and evaluation of pointing modalities for interactive stereoscopic 3D TV", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607621/12OmNzUgdi1", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2013/5089/0/5089a089", "title": "Empirical Evaluation of Traditional vs. Hybrid Interaction Metaphors in a Multitask Healthcare Simulation", "doi": null, "abstractUrl": "/proceedings-article/ichi/2013/5089a089/12OmNzgwmQa", "parentPublication": { "id": "proceedings/ichi/2013/5089/0", "title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/02/07833028", "title": "Augmented Reality versus Virtual Reality for 3D Object Manipulation", "doi": null, "abstractUrl": "/journal/tg/2018/02/07833028/13rRUwInvsX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a437", "title": "AR HMD for Remote Instruction in Healthcare", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a437/1CJd7HBOPPW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794561", "title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysuoUYBhK", "doi": "10.1109/ISMAR50242.2020.00055", "title": "AR Interfaces for Mid-Air 6-DoF Alignment: Ergonomics-Aware Design and Evaluation", "normalizedTitle": "AR Interfaces for Mid-Air 6-DoF Alignment: Ergonomics-Aware Design and Evaluation", "abstract": "Aligning hand-held objects into mid-air positions and orientations is important for many applications. Task performance depends on speed and accuracy, and also on minimizing the user's physical exertion. Augmented reality head-mounted displays (AR HMDs) can guide users during mid-air alignments by tracking an object's pose and delivering visual instruction directly into the user's field of view (FoV). However, it is unclear which AR HMD interfaces are most effective for mid-air alignment guidance, and how the form factor of current AR HMD hardware (such as heaviness and low FoV) affects how users put themselves into tiring body poses during mid-air alignment. We defined a set of design requirements for mid-air alignment interfaces that target reduction of high-exertion body poses during alignment. We then designed, implemented, and tested several interfaces in a user study in which novice participants performed a sequence of mid-air alignments using each interface.Results show that interfaces that rely on visual guidance located near the hand-held object reduce acquisition times and translation errors, while interfaces that involve aiming at a faraway virtual object reduce rotation errors. Users tend to avoid focus shifts and to position the head and arms to maximize how much AR visualization is contained within a single FoV without moving the head. We found that changing the size of visual elements affected how far out the user extends the arm, which affects torque forces. We also found that dynamically adjusting where visual guidance is placed relative to the mid-air pose can help keep the head level during alignment, which is important for distributing the weight of the AR HMD.", "abstracts": [ { "abstractType": "Regular", "content": "Aligning hand-held objects into mid-air positions and orientations is important for many applications. Task performance depends on speed and accuracy, and also on minimizing the user's physical exertion. Augmented reality head-mounted displays (AR HMDs) can guide users during mid-air alignments by tracking an object's pose and delivering visual instruction directly into the user's field of view (FoV). However, it is unclear which AR HMD interfaces are most effective for mid-air alignment guidance, and how the form factor of current AR HMD hardware (such as heaviness and low FoV) affects how users put themselves into tiring body poses during mid-air alignment. We defined a set of design requirements for mid-air alignment interfaces that target reduction of high-exertion body poses during alignment. We then designed, implemented, and tested several interfaces in a user study in which novice participants performed a sequence of mid-air alignments using each interface.Results show that interfaces that rely on visual guidance located near the hand-held object reduce acquisition times and translation errors, while interfaces that involve aiming at a faraway virtual object reduce rotation errors. Users tend to avoid focus shifts and to position the head and arms to maximize how much AR visualization is contained within a single FoV without moving the head. We found that changing the size of visual elements affected how far out the user extends the arm, which affects torque forces. We also found that dynamically adjusting where visual guidance is placed relative to the mid-air pose can help keep the head level during alignment, which is important for distributing the weight of the AR HMD.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Aligning hand-held objects into mid-air positions and orientations is important for many applications. Task performance depends on speed and accuracy, and also on minimizing the user's physical exertion. Augmented reality head-mounted displays (AR HMDs) can guide users during mid-air alignments by tracking an object's pose and delivering visual instruction directly into the user's field of view (FoV). However, it is unclear which AR HMD interfaces are most effective for mid-air alignment guidance, and how the form factor of current AR HMD hardware (such as heaviness and low FoV) affects how users put themselves into tiring body poses during mid-air alignment. We defined a set of design requirements for mid-air alignment interfaces that target reduction of high-exertion body poses during alignment. We then designed, implemented, and tested several interfaces in a user study in which novice participants performed a sequence of mid-air alignments using each interface.Results show that interfaces that rely on visual guidance located near the hand-held object reduce acquisition times and translation errors, while interfaces that involve aiming at a faraway virtual object reduce rotation errors. Users tend to avoid focus shifts and to position the head and arms to maximize how much AR visualization is contained within a single FoV without moving the head. We found that changing the size of visual elements affected how far out the user extends the arm, which affects torque forces. We also found that dynamically adjusting where visual guidance is placed relative to the mid-air pose can help keep the head level during alignment, which is important for distributing the weight of the AR HMD.", "fno": "850800a289", "keywords": [ "Augmented Reality", "Data Visualisation", "Helmet Mounted Displays", "User Interfaces", "Mid Air 6 Do F Alignment", "Mid Air Positions", "Augmented Reality Head Mounted Displays", "AR HMD Interfaces", "Mid Air Alignment Guidance", "Visual Guidance", "Hand Held Object", "Ergonomics Aware Design", "Ergonomics Aware Evaluation", "Field Of View", "Fo V", "AR Visualization", "Visualization", "Torque", "Resists", "Hardware", "Space Exploration", "Task Analysis", "Augmented Reality", "Human Centered Computing X 2014 User Studies", "Computing Methodologies X 2014 Mixed Augmented Reality" ], "authors": [ { "affiliation": "Purdue University", "fullName": "Daniel Andersen", "givenName": "Daniel", "surname": "Andersen", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Voicu Popescu", "givenName": "Voicu", "surname": "Popescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "289-300", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a279", "articleId": "1pysvRpTvr2", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a301", "articleId": "1pysxIK95Yc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892261", "title": "The AR-Rift 2 prototype", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892261/12OmNCcKQmq", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710411", "title": "Composing 6 DOF Tracking Systems for VR/AR", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710411/12OmNyvoXbi", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948438", "title": "[Poster] Visual-lnertial 6-DOF localization for a wearable immersive VR/AR system", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948438/12OmNzd7bC0", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a722", "title": "AIR-range: Arranging optical systems to present mid-AIR images with continuous luminance on and above a tabletop", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a722/1CJd3cfYsbm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a876", "title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a686", "title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a637", "title": "Blending On-Body and Mid-Air Interaction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a637/1JrRmvhGko0", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a026", "title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a026/1gysn4uy67C", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a332", "title": "ARPads: Mid-air Indirect Input for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a332/1pysxWDVgS4", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a492", "title": "Determining the Target Point of the Mid-Air Pinch Gesture", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a492/1tnXsQx2NOw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwvVrMU", "title": "2013 Fifth International Conference on Service Science and Innovation (ICSSI)", "acronym": "icssi", "groupId": "1802839", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNAo45Fu", "doi": "10.1109/ICSSI.2013.31", "title": "Evaluating Interactions between Appearance-Related Product Designs and Facial Characteristics", "normalizedTitle": "Evaluating Interactions between Appearance-Related Product Designs and Facial Characteristics", "abstract": "Design prototype evaluation plays a crucial role in user experience assessment. This paper proposes an evaluation scheme to investigate interactions between design features of a product and user facial characteristics. Glasses frames design is used as an example to illustrate how the scheme works. 3D scanning technology is applied to capture the facial features of users and reconstruct their 3D face models. Those models allow us to post-process individual facial feature without changing the others. Subjects respond to three affective measures: confidence, friendliness, and attractiveness, signified by the faces wearing the factorized glasses frames. The results show that changing certain design features indeed influences the impressions of the faces with varied facial characteristics. The proposed scheme facilitates design of products related to personal appearance.", "abstracts": [ { "abstractType": "Regular", "content": "Design prototype evaluation plays a crucial role in user experience assessment. This paper proposes an evaluation scheme to investigate interactions between design features of a product and user facial characteristics. Glasses frames design is used as an example to illustrate how the scheme works. 3D scanning technology is applied to capture the facial features of users and reconstruct their 3D face models. Those models allow us to post-process individual facial feature without changing the others. Subjects respond to three affective measures: confidence, friendliness, and attractiveness, signified by the faces wearing the factorized glasses frames. The results show that changing certain design features indeed influences the impressions of the faces with varied facial characteristics. The proposed scheme facilitates design of products related to personal appearance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Design prototype evaluation plays a crucial role in user experience assessment. This paper proposes an evaluation scheme to investigate interactions between design features of a product and user facial characteristics. Glasses frames design is used as an example to illustrate how the scheme works. 3D scanning technology is applied to capture the facial features of users and reconstruct their 3D face models. Those models allow us to post-process individual facial feature without changing the others. Subjects respond to three affective measures: confidence, friendliness, and attractiveness, signified by the faces wearing the factorized glasses frames. The results show that changing certain design features indeed influences the impressions of the faces with varied facial characteristics. The proposed scheme facilitates design of products related to personal appearance.", "fno": "4985a115", "keywords": [ "Face", "Glass", "Three Dimensional Displays", "Solid Modeling", "Prototypes", "Facial Features", "Shape", "Affective Design", "Design Evaluation", "Emotional Engineering", "Kansei" ], "authors": [ { "affiliation": null, "fullName": "Chih-Hsing Chu", "givenName": "Chih-Hsing", "surname": "Chu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Szu-Hao Huang", "givenName": "Szu-Hao", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "I-Jan Wang", "givenName": "I-Jan", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Cheng-Hung Lo", "givenName": "Cheng-Hung", "surname": "Lo", "__typename": "ArticleAuthorType" } ], "idPrefix": "icssi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-05-01T00:00:00", "pubType": "proceedings", "pages": "115-118", "year": "2013", "issn": null, "isbn": "978-0-7695-4985-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4985a108", "articleId": "12OmNAkWvnJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "4985a119", "articleId": "12OmNz5JBTW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/est/2012/4791/0/4791a027", "title": "Understanding Familiar Face Recognition for 3D Scanned Images: The Importance of Internal and External Facial Features", "doi": null, "abstractUrl": "/proceedings-article/est/2012/4791a027/12OmNBRbkpQ", "parentPublication": { "id": "proceedings/est/2012/4791/0", "title": "2012 Third International Conference on Emerging Security Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204302", "title": "Measuring changes in face appearance through aging", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204302/12OmNBigFnc", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04760995", "title": "Combining motion and appearance for gender classification from video sequences", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04760995/12OmNCcbEfL", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2013/2540/0/06749338", "title": "Face recognition using Elastic bunch graph matching", "doi": null, "abstractUrl": "/proceedings-article/aipr/2013/06749338/12OmNxRF73y", "parentPublication": { "id": "proceedings/aipr/2013/2540/0", "title": "2013 IEEE Applied Imagery Pattern Recognition Workshop: Sensing for Control and Augmentation (AIPR 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477556", "title": "Effect of illicit drug abuse on face recognition", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477556/12OmNyL0TJc", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/3/81833308", "title": "Phantom faces for face analysis", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81833308/12OmNyuPKYA", "parentPublication": { "id": "proceedings/icip/1997/8183/3", "title": "Proceedings of International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2012/0233/0/06163008", "title": "A sparse representation approach to face matching across plastic surgery", "doi": null, "abstractUrl": "/proceedings-article/wacv/2012/06163008/12OmNz5JCh9", "parentPublication": { "id": "proceedings/wacv/2012/0233/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2004/2081/0/01409812", "title": "A new method for eye extraction from facial image", "doi": null, "abstractUrl": "/proceedings-article/delta/2004/01409812/12OmNzvhvF8", "parentPublication": { "id": "proceedings/delta/2004/2081/0", "title": "Proceedings. DELTA 2004. Second IEEE International Workshop on Electronic Design, Test and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545352", "title": "Grouped Multi-Task CNN for Facial Attribute Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545352/17D45XuDNH4", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2019/1664/0/166400a225", "title": "Intelligent In-Vehicle Safety and Security Monitoring System with Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2019/166400a225/1fHkv6CdPdm", "parentPublication": { "id": "proceedings/cse-euc/2019/1664/0", "title": "2019 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwDACj6", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "acronym": "cts", "groupId": "1001747", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNyuy9RX", "doi": "10.1109/CTS.2016.0084", "title": "Tele-Board Prototyper - Distributed 3D Modeling in a Web-Based Real-Time Collaboration System", "normalizedTitle": "Tele-Board Prototyper - Distributed 3D Modeling in a Web-Based Real-Time Collaboration System", "abstract": "Prototypes help people to externalize their ideas and are a basic element for gathering feedback on an early product design. Prototyping is oftentimes a team-based method traditionally involving physical and analog tools. At the same time, collaboration among geographically dispersed team members becomes more and more standard practice for companies and research teams. Therefore, a growing need arises for collaborative prototyping environments. We present a standards compliant, web browser-based real-time remote 3D modeling system. We utilize cross-platform WebGL rendering API for hardware accelerated visualization of 3D models. Synchronization relies on WebSocket-based message interchange over a centralized Node.js real-time collaboration server. In a first co-located user test, participants were able to rebuild physical prototypes without having prior knowledge of the system. This way, the provided system design and its implementation can serve as a basis for visual real-time collaboration systems available across a multitude of hardware devices.", "abstracts": [ { "abstractType": "Regular", "content": "Prototypes help people to externalize their ideas and are a basic element for gathering feedback on an early product design. Prototyping is oftentimes a team-based method traditionally involving physical and analog tools. At the same time, collaboration among geographically dispersed team members becomes more and more standard practice for companies and research teams. Therefore, a growing need arises for collaborative prototyping environments. We present a standards compliant, web browser-based real-time remote 3D modeling system. We utilize cross-platform WebGL rendering API for hardware accelerated visualization of 3D models. Synchronization relies on WebSocket-based message interchange over a centralized Node.js real-time collaboration server. In a first co-located user test, participants were able to rebuild physical prototypes without having prior knowledge of the system. This way, the provided system design and its implementation can serve as a basis for visual real-time collaboration systems available across a multitude of hardware devices.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Prototypes help people to externalize their ideas and are a basic element for gathering feedback on an early product design. Prototyping is oftentimes a team-based method traditionally involving physical and analog tools. At the same time, collaboration among geographically dispersed team members becomes more and more standard practice for companies and research teams. Therefore, a growing need arises for collaborative prototyping environments. We present a standards compliant, web browser-based real-time remote 3D modeling system. We utilize cross-platform WebGL rendering API for hardware accelerated visualization of 3D models. Synchronization relies on WebSocket-based message interchange over a centralized Node.js real-time collaboration server. In a first co-located user test, participants were able to rebuild physical prototypes without having prior knowledge of the system. This way, the provided system design and its implementation can serve as a basis for visual real-time collaboration systems available across a multitude of hardware devices.", "fno": "07871022", "keywords": [ "Application Program Interfaces", "Data Visualisation", "Groupware", "Online Front Ends", "Real Time Systems", "Rendering Computer Graphics", "Synchronisation", "Virtual Prototyping", "Tele Board Prototyper", "Distributed 3 D Modeling", "Web Based Real Time Collaboration System", "Web Browser", "Real Time Remote 3 D Modeling System", "Cross Platform Web GL Rendering API", "Hardware Accelerated Visualization", "Synchronization", "Web Socket Based Message Interchange", "Centralized Node Js Real Time Collaboration Server", "Three Dimensional Displays", "Solid Modeling", "Collaboration", "Real Time Systems", "Browsers", "Prototypes", "Synchronization", "Remote Collaboration", "Prototyping", "3 0", "Real Time", "Java Script", "Web Gl" ], "authors": [ { "affiliation": null, "fullName": "Matthias Wenzel", "givenName": "Matthias", "surname": "Wenzel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Adrian Klinger", "givenName": "Adrian", "surname": "Klinger", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christoph Meinel", "givenName": "Christoph", "surname": "Meinel", "__typename": "ArticleAuthorType" } ], "idPrefix": "cts", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "446-453", "year": "2016", "issn": null, "isbn": "978-1-5090-2300-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07871021", "articleId": "12OmNxbW4Rn", "__typename": "AdjacentArticleType" }, "next": { "fno": "07871023", "articleId": "12OmNvoWUXN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fie/2014/3922/0/07044423", "title": "Prototyping: A key skill for innovation and life-long learning", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07044423/12OmNAZfxG8", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2017/2565/0/256501a011", "title": "A Comparative Study of E-Collaboration on Temporary vs. Ongoing Teams", "doi": null, "abstractUrl": "/proceedings-article/cic/2017/256501a011/12OmNBlFQW7", "parentPublication": { "id": "proceedings/cic/2017/2565/0", "title": "2017 IEEE 3rd International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gas/2015/7046/0/7046a022", "title": "Space Connection: A New 3D Tele-immersion Platform for Web-Based Gesture-Collaborative Games and Services", "doi": null, "abstractUrl": "/proceedings-article/gas/2015/7046a022/12OmNwtn3sw", "parentPublication": { "id": "proceedings/gas/2015/7046/0", "title": "2015 IEEE/ACM 4th International Workshop on Games and Software Engineering (GAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/collaboratecom/2014/043/0/07014563", "title": "SAGE2: A new approach for data intensive collaboration using Scalable Resolution Shared Displays", "doi": null, "abstractUrl": "/proceedings-article/collaboratecom/2014/07014563/12OmNxxNbT7", "parentPublication": { "id": "proceedings/collaboratecom/2014/043/0", "title": "2014 International Conference on Collaborative Computing: Networking, Applications and Worksharing (CollaborateCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2015/8454/0/07344060", "title": "Understanding the prototyping strategies of experienced designers", "doi": null, "abstractUrl": "/proceedings-article/fie/2015/07344060/12OmNy4r3Tj", "parentPublication": { "id": "proceedings/fie/2015/8454/0", "title": "2015 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2016/4149/0/4149a071", "title": "A User Perspective Analysis on Augmented vs 3D Printed Prototypes for Product's Project Design", "doi": null, "abstractUrl": "/proceedings-article/svr/2016/4149a071/12OmNzcPAeW", "parentPublication": { "id": "proceedings/svr/2016/4149/0", "title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/collaboratecom/2012/2740/0/06450919", "title": "SpreadComp platform: A new paradigm for distributed spreadsheet collaboration and composition", "doi": null, "abstractUrl": "/proceedings-article/collaboratecom/2012/06450919/12OmNzdoMjY", "parentPublication": { "id": "proceedings/collaboratecom/2012/2740/0", "title": "2012 8th International Conference on Collaborative Computing: Networking, Applications and Worksharing (CollaborateCom 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2005/04/b4027", "title": "Multipurpose Prototypes for Assessing User Interfaces in Pervasive Computing Systems", "doi": null, "abstractUrl": "/magazine/pc/2005/04/b4027/13rRUwInvio", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cste/2022/8188/0/818800a006", "title": "Requirements Analysis and a Design Model for Educational VR Prototyping", "doi": null, "abstractUrl": "/proceedings-article/cste/2022/818800a006/1J7W3e34rLy", "parentPublication": { "id": "proceedings/cste/2022/8188/0", "title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a001", "title": "A Conceptual Model for Data Collection and Analysis for AR-based Remote Collaboration Evaluation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a001/1pBMfVnQJAA", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiqJ", "title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "acronym": "icmcce", "groupId": "1824464", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WwsQ4Z", "doi": "10.1109/ICMCCE.2018.00044", "title": "Teapot Three-Dimensional Geometrical Model Reconstruction Based on Reverse Engineering and Rapid Prototyping Technology", "normalizedTitle": "Teapot Three-Dimensional Geometrical Model Reconstruction Based on Reverse Engineering and Rapid Prototyping Technology", "abstract": "Based on the 3D modeling method reconstruction of reverse engineering and rapid prototyping technology, the surface of the kettle body and the lid of the teapot was digitized, realizing the process of entity → 3D data → 3D model reconstruction → entity model. In this paper, the teapot surface was digitally measured with the non-contact digital technology, handheld 3D laser scanner to obtain a lot of point-cloud data. The reconstruction of the model was accomplished using computer-aided software such as Imageware and UG, and the output of entity modeling was achieved using a rapid prototyping machine. The combination of reverse engineering and rapid prototyping technology simplifies the process of product reoptimization and effectively shortens the design and the development cycle of the product.", "abstracts": [ { "abstractType": "Regular", "content": "Based on the 3D modeling method reconstruction of reverse engineering and rapid prototyping technology, the surface of the kettle body and the lid of the teapot was digitized, realizing the process of entity → 3D data → 3D model reconstruction → entity model. In this paper, the teapot surface was digitally measured with the non-contact digital technology, handheld 3D laser scanner to obtain a lot of point-cloud data. The reconstruction of the model was accomplished using computer-aided software such as Imageware and UG, and the output of entity modeling was achieved using a rapid prototyping machine. The combination of reverse engineering and rapid prototyping technology simplifies the process of product reoptimization and effectively shortens the design and the development cycle of the product.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Based on the 3D modeling method reconstruction of reverse engineering and rapid prototyping technology, the surface of the kettle body and the lid of the teapot was digitized, realizing the process of entity → 3D data → 3D model reconstruction → entity model. In this paper, the teapot surface was digitally measured with the non-contact digital technology, handheld 3D laser scanner to obtain a lot of point-cloud data. The reconstruction of the model was accomplished using computer-aided software such as Imageware and UG, and the output of entity modeling was achieved using a rapid prototyping machine. The combination of reverse engineering and rapid prototyping technology simplifies the process of product reoptimization and effectively shortens the design and the development cycle of the product.", "fno": "848100a180", "keywords": [ "CAD", "Optical Scanners", "Product Development", "Production Engineering Computing", "Rapid Prototyping Industrial", "Reverse Engineering", "Solid Modelling", "Rapid Prototyping Machine", "Reverse Engineering", "Rapid Prototyping Technology", "3 D Modeling Method Reconstruction", "Teapot Surface", "Noncontact Digital Technology", "Handheld 3 D Laser Scanner", "Entity Modeling", "Point Cloud Data", "Computer Aided Software", "Product Reoptimization", "Product Development Cycle", "Teapot Three Dimensional Geometrical Model Reconstruction", "Three Dimensional Displays", "Handheld Computers", "Reverse Engineering", "Rapid Prototyping", "Solid Modeling", "Surface Reconstruction", "Teapot", "Reverse Engineering", "Rapid Prototyping", "3 D Model Reconstruction" ], "authors": [ { "affiliation": null, "fullName": "Shuaishuai Lv", "givenName": "Shuaishuai", "surname": "Lv", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yangyang Zhu", "givenName": "Yangyang", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hongjun Ni", "givenName": "Hongjun", "surname": "Ni", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xingxing Wang", "givenName": "Xingxing", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tiancheng Huang", "givenName": "Tiancheng", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jie Zhang", "givenName": "Jie", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmcce", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-09-01T00:00:00", "pubType": "proceedings", "pages": "180-184", "year": "2018", "issn": null, "isbn": "978-1-5386-8481-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "848100a175", "articleId": "17D45WnnFXG", "__typename": "AdjacentArticleType" }, "next": { "fno": "848100a185", "articleId": "17D45WHONnx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rsp/2008/3180/0/3180a017", "title": "Functional DIF for Rapid Prototyping", "doi": null, "abstractUrl": "/proceedings-article/rsp/2008/3180a017/12OmNCcKQu6", "parentPublication": { "id": "proceedings/rsp/2008/3180/0", "title": "2008 19th IEEE/IFIP International Symposium on Rapid System Prototyping", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rsp/1999/0246/0/02460052", "title": "Language-Based Rapid Prototyping Methods for Legacy System Re-Engineering and Re-Use", "doi": null, "abstractUrl": "/proceedings-article/rsp/1999/02460052/12OmNrAMETi", "parentPublication": { "id": "proceedings/rsp/1999/0246/0", "title": "Rapid System Prototyping, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rsp/1999/0246/0/02460128", "title": "FPGA Partitioning for Rapid Prototyping : A 1 Million Gate Design Case Study", "doi": null, "abstractUrl": "/proceedings-article/rsp/1999/02460128/12OmNzFMFsm", "parentPublication": { "id": "proceedings/rsp/1999/0246/0", "title": "Rapid System Prototyping, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1995/06/mcg1995060020", "title": "Tele-Manufacturing: Rapid Prototyping on the Internet", "doi": null, "abstractUrl": "/magazine/cg/1995/06/mcg1995060020/13rRUwh80JK", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ds/2007/05/o5001", "title": "Guest Editor's Introduction: Rapid System Prototyping", "doi": null, "abstractUrl": "/magazine/ds/2007/05/o5001/13rRUxAATch", "parentPublication": { "id": "mags/ds", "title": "IEEE Distributed Systems Online", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2005/04/b4058", "title": "Rapid Prototyping and User-Centered Design of Interactive Display-Based Systems", "doi": null, "abstractUrl": "/magazine/pc/2005/04/b4058/13rRUxASuey", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ds/2007/04/o4007", "title": "Guest Editor's Introduction: Rapid System Prototyping", "doi": null, "abstractUrl": "/magazine/ds/2007/04/o4007/13rRUxBJhqs", "parentPublication": { "id": "mags/ds", "title": "IEEE Distributed Systems Online", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ds/2007/03/o3007", "title": "Guest Editor's Introduction: Rapid System Prototyping", "doi": null, "abstractUrl": "/magazine/ds/2007/03/o3007/13rRUy3gmYQ", "parentPublication": { "id": "mags/ds", "title": "IEEE Distributed Systems Online", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2018/8481/0/848100a185", "title": "Three-Dimension Model and Rapid Prototyping of Toy Pen Holder Based on Reverse Engineering", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2018/848100a185/17D45WHONnx", "parentPublication": { "id": "proceedings/icmcce/2018/8481/0", "title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2018/8481/0/848100a175", "title": "Reverse Design and Rapid Prototyping of Single or Small Batch Teacup", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2018/848100a175/17D45WnnFXG", "parentPublication": { "id": "proceedings/icmcce/2018/8481/0", "title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G4EUUmGcrS", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G4EZMpAbSg", "doi": "10.1109/ICMEW56448.2022.9859434", "title": "3D-DSPnet: Product Disassembly Sequence Planning", "normalizedTitle": "3D-DSPnet: Product Disassembly Sequence Planning", "abstract": "Product Disassembly has become an area of active research as it supports sustainable development by aiding effective end-of-life (EOL) stage strategies like reuse, re-manufacturing, recycling, etc. In this work, we propose a new approach, 3D-DSPNet, that can utilize 3D data from CAD assembly models to generate a feasible disassembly sequence. Our approach uses Graph-based learning to process the graph representation of CAD models. Currently, the available 3D CAD model datasets lack ground truth disassembly sequences. We propose and curate a new dataset, the 3D-DSP dataset, which includes ground truth information about the disassembly sequence for 3D product models. We carry out evaluation and analysis of results to explain the efficacy of the proposed method. Our approach significantly outperforms the existing baseline. We develop an Autodesk Fusion 360 plug-in that generates disassembly sequence animation, allowing intuitive analysis of the disassembly plan.", "abstracts": [ { "abstractType": "Regular", "content": "Product Disassembly has become an area of active research as it supports sustainable development by aiding effective end-of-life (EOL) stage strategies like reuse, re-manufacturing, recycling, etc. In this work, we propose a new approach, 3D-DSPNet, that can utilize 3D data from CAD assembly models to generate a feasible disassembly sequence. Our approach uses Graph-based learning to process the graph representation of CAD models. Currently, the available 3D CAD model datasets lack ground truth disassembly sequences. We propose and curate a new dataset, the 3D-DSP dataset, which includes ground truth information about the disassembly sequence for 3D product models. We carry out evaluation and analysis of results to explain the efficacy of the proposed method. Our approach significantly outperforms the existing baseline. We develop an Autodesk Fusion 360 plug-in that generates disassembly sequence animation, allowing intuitive analysis of the disassembly plan.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Product Disassembly has become an area of active research as it supports sustainable development by aiding effective end-of-life (EOL) stage strategies like reuse, re-manufacturing, recycling, etc. In this work, we propose a new approach, 3D-DSPNet, that can utilize 3D data from CAD assembly models to generate a feasible disassembly sequence. Our approach uses Graph-based learning to process the graph representation of CAD models. Currently, the available 3D CAD model datasets lack ground truth disassembly sequences. We propose and curate a new dataset, the 3D-DSP dataset, which includes ground truth information about the disassembly sequence for 3D product models. We carry out evaluation and analysis of results to explain the efficacy of the proposed method. Our approach significantly outperforms the existing baseline. We develop an Autodesk Fusion 360 plug-in that generates disassembly sequence animation, allowing intuitive analysis of the disassembly plan.", "fno": "09859434", "keywords": [ "Assembling", "CAD", "Computer Animation", "Design For Disassembly", "Graph Theory", "Learning Artificial Intelligence", "Production Engineering Computing", "Recycling", "Sustainable Development", "Product Disassembly Sequence Planning", "Sustainable Development", "End Of Life Stage Strategies", "CAD Assembly Models", "Feasible Disassembly Sequence", "Graph Representation", "CAD Models", "Available 3 D CAD Model Datasets", "Truth Disassembly Sequences", "3 D DSP Dataset", "Ground Truth Information", "3 D Product Models", "Disassembly Sequence Animation", "Disassembly Plan", "Solid Modeling", "Three Dimensional Displays", "Biological System Modeling", "Multimedia Systems", "Conferences", "Animation", "Data Models", "Disassembly Planning", "EOL", "Circular Economy", "CAD" ], "authors": [ { "affiliation": "Accenture Labs,Bangalore", "fullName": "Abhinav Upadhyay", "givenName": "Abhinav", "surname": "Upadhyay", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Technology,Ropar", "fullName": "Bharat Ladrecha", "givenName": "Bharat", "surname": "Ladrecha", "__typename": "ArticleAuthorType" }, { "affiliation": "Accenture Labs,Bangalore", "fullName": "Alpana Dubey", "givenName": "Alpana", "surname": "Dubey", "__typename": "ArticleAuthorType" }, { "affiliation": "Accenture Labs,Bangalore", "fullName": "Suma Mani Kuriakose", "givenName": "Suma Mani", "surname": "Kuriakose", "__typename": "ArticleAuthorType" }, { "affiliation": "Accenture Labs,Bangalore", "fullName": "Piyush Goenka", "givenName": "Piyush", "surname": "Goenka", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-7218-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859499", "articleId": "1G4F2d3L4Va", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859414", "articleId": "1G4EZE2pQYg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isee/2005/8910/0/01437038", "title": "Intelligent disassembly sequence planning for EOL recycling based on hierarchical fuzzy cognitive map", "doi": null, "abstractUrl": "/proceedings-article/isee/2005/01437038/12OmNAMtAOV", "parentPublication": { "id": "proceedings/isee/2005/8910/0", "title": "Proceedings of the 2005 IEEE International Symposium on Electronics and the Environment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isatp/2003/7770/0/01217203", "title": "Optimum disassembly sequence with sequence-dependent disassembly costs", "doi": null, "abstractUrl": "/proceedings-article/isatp/2003/01217203/12OmNB0nWcI", "parentPublication": { "id": "proceedings/isatp/2003/7770/0", "title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04810993", "title": "Haptic Assembly and Disassembly Task Assistance using Interactive Path Planning", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04810993/12OmNBSSVjp", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isatp/1995/6995/0/69950232", "title": "Features modeling in assembly sequence and resource planning", "doi": null, "abstractUrl": "/proceedings-article/isatp/1995/69950232/12OmNqBKTYG", "parentPublication": { "id": "proceedings/isatp/1995/6995/0", "title": "Assembly and Task Planning, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icebe/2013/5111/0/5111a393", "title": "A Simplified Teaching-Learning-Based Optimization Algorithm for Disassembly Sequence Planning", "doi": null, "abstractUrl": "/proceedings-article/icebe/2013/5111a393/12OmNrJROVg", "parentPublication": { "id": "proceedings/icebe/2013/5111/0", "title": "2013 IEEE 10th International Conference on e-Business Engineering (ICEBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscid/2010/4198/1/4198a044", "title": "Disassembly Sequence Decision Making for Products Recycling and Remanufacturing Systems", "doi": null, "abstractUrl": "/proceedings-article/iscid/2010/4198a044/12OmNvjyxFw", "parentPublication": { "id": "proceedings/iscid/2010/4198/1", "title": "Computational Intelligence and Design, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isee/2000/5962/0/00857644", "title": "A Web-oriented, virtual product disassembly and identification method for DFE and electronic demanufacturers", "doi": null, "abstractUrl": "/proceedings-article/isee/2000/00857644/12OmNwGqBnB", "parentPublication": { "id": "proceedings/isee/2000/5962/0", "title": "Proceedings of the 2000 IEEE International Symposium on Electronics and the Environment. ISEE - 2000", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/2/3962c627", "title": "Mechanical Product Disassembly and/or Graph Construction", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962c627/12OmNy1SFM4", "parentPublication": { "id": "proceedings/icmtma/2010/3962/2", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/peits/2008/3342/0/3342a587", "title": "Research on Virtual Disassembly Simulation Based on Constraint Matrix", "doi": null, "abstractUrl": "/proceedings-article/peits/2008/3342a587/12OmNzBOidf", "parentPublication": { "id": "proceedings/peits/2008/3342/0", "title": "2008 Workshop on Power Electronics and Intelligent Transportation System", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceic/2020/8573/0/857300a032", "title": "A Method of Equipment Disassembly Path Planning Based on Directed Constraint Graph Disassembly Sequence", "doi": null, "abstractUrl": "/proceedings-article/icceic/2020/857300a032/1rCgt6wzUbe", "parentPublication": { "id": "proceedings/icceic/2020/8573/0", "title": "2020 International Conference on Computer Engineering and Intelligent Control (ICCEIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxqx9VfS8", "doi": "10.1109/VRW50115.2020.00030", "title": "Pipeline of anatomical models generation. Experience of surgical planning and medical personal training by means of virtual environments and physical prototyping", "normalizedTitle": "Pipeline of anatomical models generation. Experience of surgical planning and medical personal training by means of virtual environments and physical prototyping", "abstract": "In medical practice, immersive experiences are rapidly expanding.The generation of 3D models depicting anatomical structures with medical relevance is necessary. Applying extended reality (XR) technologies, this work presents a pipeline to generate 3D content suitable to be used in a virtual environment or printed as a physical prototype, as an aid to the treatment of patients with complex congenital heart and tracheal diseases. We strive to provide a valuable tool to improve different aspects involved in the treatment of complicated medical cases, particularly, people training and surgical planning. The paper presents an example, a real medical case, that illustrates the process. Keywords: Virtual reality, 3D model, 3D printing, medical images, surgical planning", "abstracts": [ { "abstractType": "Regular", "content": "In medical practice, immersive experiences are rapidly expanding.The generation of 3D models depicting anatomical structures with medical relevance is necessary. Applying extended reality (XR) technologies, this work presents a pipeline to generate 3D content suitable to be used in a virtual environment or printed as a physical prototype, as an aid to the treatment of patients with complex congenital heart and tracheal diseases. We strive to provide a valuable tool to improve different aspects involved in the treatment of complicated medical cases, particularly, people training and surgical planning. The paper presents an example, a real medical case, that illustrates the process. Keywords: Virtual reality, 3D model, 3D printing, medical images, surgical planning", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In medical practice, immersive experiences are rapidly expanding.The generation of 3D models depicting anatomical structures with medical relevance is necessary. Applying extended reality (XR) technologies, this work presents a pipeline to generate 3D content suitable to be used in a virtual environment or printed as a physical prototype, as an aid to the treatment of patients with complex congenital heart and tracheal diseases. We strive to provide a valuable tool to improve different aspects involved in the treatment of complicated medical cases, particularly, people training and surgical planning. The paper presents an example, a real medical case, that illustrates the process. Keywords: Virtual reality, 3D model, 3D printing, medical images, surgical planning", "fno": "09090485", "keywords": [ "Three Dimensional Displays", "Solid Modeling", "Surgery", "Biomedical Imaging", "Pipelines", "Tools", "Planning", "Virtual Reality", "3 D Model", "3 D Printing", "Medical Images", "Surgical Planning", "Applied Computing", "Life And Medical Sciences", "Bioinformatics", "Computing Methodologies", "Computer Graphics", "Graphics Systems And Interfaces", "Virtual Reality", "Computing Methodologies", "Computer Graphics", "Image Manipulation", "Image Processing" ], "authors": [ { "affiliation": "Fundación Clínica Shaio", "fullName": "Carlos J. Latorre", "givenName": "Carlos J.", "surname": "Latorre", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Martha Lucia Velasco Morales", "givenName": "Martha Lucia Velasco", "surname": "Morales", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Victor Manuel Caicedo Ayerbe", "givenName": "Victor Manuel Caicedo", "surname": "Ayerbe", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Maria L. Arango", "givenName": "Maria L.", "surname": "Arango", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Diana Sofía Herrera-Valenzuela", "givenName": "Diana Sofía", "surname": "Herrera-Valenzuela", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Diana Patricia Romero Lara", "givenName": "Diana Patricia Romero", "surname": "Lara", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Leonardo Stiven Pardo Niño", "givenName": "Leonardo Stiven Pardo", "surname": "Niño", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Federico Javier Nuñez Ricardo", "givenName": "Federico Javier Nuñez", "surname": "Ricardo", "__typename": "ArticleAuthorType" }, { "affiliation": "Fundación Clínica Shaio", "fullName": "Fabián Cortés-Muñoz", "givenName": "Fabián", "surname": "Cortés-Muñoz", "__typename": "ArticleAuthorType" }, { "affiliation": "Multimedia Research Group, Universidad Militar Nueva Granada,Bogotá,Colombia", "fullName": "Wilson J. Sarmiento", "givenName": "Wilson J.", "surname": "Sarmiento", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "142-146", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090559", "articleId": "1jIxoACmybu", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090505", "articleId": "1jIxoIQrIVq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2014/4261/0/4261a287", "title": "A Gesture Control System for Aiding Surgical Procedures", "doi": null, "abstractUrl": "/proceedings-article/svr/2014/4261a287/12OmNzTYCb2", "parentPublication": { "id": "proceedings/svr/2014/4261/0", "title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2014/4435/0/4435a551", "title": "Haptic System for Force-Profile Acquisition and Display for a Realistic Surgical Simulator", "doi": null, "abstractUrl": "/proceedings-article/cbms/2014/4435a551/12OmNzYNNae", "parentPublication": { "id": "proceedings/cbms/2014/4435/0", "title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/05/mcg2013050048", "title": "The LiverAnatomyExplorer: A WebGL-Based Surgical Teaching Tool", "doi": null, "abstractUrl": "/magazine/cg/2013/05/mcg2013050048/13rRUNvgzct", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1996/01/mcg1996010046", "title": "Assessing Craniofacial Surgical Simulation", "doi": null, "abstractUrl": "/magazine/cg/1996/01/mcg1996010046/13rRUy0ZzUT", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/var4good/2018/5977/0/08576884", "title": "Augmented Visual Instruction for Surgical Practice and Training", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbe/2021/0099/0/009900a304", "title": "Design and 3D Printing of Liver Surgical Guide Template Based on Mimics Liver Model Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icitbe/2021/009900a304/1AH7MiebmZq", "parentPublication": { "id": "proceedings/icitbe/2021/0099/0", "title": "2021 International Conference on Information Technology and Biomedical Engineering (ICITBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisai/2021/0692/0/069200a430", "title": "Colorectal Tumour Segmentation Based on 3D-UNet", "doi": null, "abstractUrl": "/proceedings-article/cisai/2021/069200a430/1BmO1xhGEHm", "parentPublication": { "id": "proceedings/cisai/2021/0692/0", "title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a410", "title": "Towards Virtual Teaching Hospitals for Advanced Surgical Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a410/1CJetJYrF3q", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/annsim/2022/5288/0/09859317", "title": "Fully Automated Conversion Of Glioma Clinical MRI Scans Into A 3D Virtual Reality Model For Presurgical Planning", "doi": null, "abstractUrl": "/proceedings-article/annsim/2022/09859317/1G4ETqCyoV2", "parentPublication": { "id": "proceedings/annsim/2022/5288/0", "title": "2022 Annual Modeling and Simulation Conference (ANNSIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vahc/2021/2067/0/206700a036", "title": "Phoenix Virtual Heart: A Hybrid VR-Desktop Visualization System for Cardiac Surgery Planning and Education", "doi": null, "abstractUrl": "/proceedings-article/vahc/2021/206700a036/1z0yljXrZhS", "parentPublication": { "id": "proceedings/vahc/2021/2067/0", "title": "2021 IEEE Workshop on Visual Analytics in Healthcare (VAHC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jdDLJeFCBW", "title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)", "acronym": "csci", "groupId": "1803739", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1jdDLZmoQJa", "doi": "10.1109/CSCI49370.2019.00114", "title": "Collaborative Virtual Assembly Environment for Product Design", "normalizedTitle": "Collaborative Virtual Assembly Environment for Product Design", "abstract": "Collaborative virtual assembly environment is a vital computer-aided design tool in product design and can be used as a learning and training tool. It helps in supporting complex product design by enabling designers to collaborate and communicate with other designers involved in the product design. This paper proposes a collaborative virtual assembly environment built in two phases for the immersive and non-immersive environments. Phase one was developed in Unity 3D using Virtual Reality Toolkit (VRTK) and Steam VR. Whereas, phase two was built using Vizard and Vizible. This work aims to allow scientists and engineers to discuss the concept design in a real-time VR environment so that they can interact with the objects and review their work before it is deployed. This paper proposes the system architecture and describes the design and implementation of a collaborative virtual assembly environment. The outcome of this work is to be able to resolve communication and interaction problems that arise during the concept-design phase.", "abstracts": [ { "abstractType": "Regular", "content": "Collaborative virtual assembly environment is a vital computer-aided design tool in product design and can be used as a learning and training tool. It helps in supporting complex product design by enabling designers to collaborate and communicate with other designers involved in the product design. This paper proposes a collaborative virtual assembly environment built in two phases for the immersive and non-immersive environments. Phase one was developed in Unity 3D using Virtual Reality Toolkit (VRTK) and Steam VR. Whereas, phase two was built using Vizard and Vizible. This work aims to allow scientists and engineers to discuss the concept design in a real-time VR environment so that they can interact with the objects and review their work before it is deployed. This paper proposes the system architecture and describes the design and implementation of a collaborative virtual assembly environment. The outcome of this work is to be able to resolve communication and interaction problems that arise during the concept-design phase.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Collaborative virtual assembly environment is a vital computer-aided design tool in product design and can be used as a learning and training tool. It helps in supporting complex product design by enabling designers to collaborate and communicate with other designers involved in the product design. This paper proposes a collaborative virtual assembly environment built in two phases for the immersive and non-immersive environments. Phase one was developed in Unity 3D using Virtual Reality Toolkit (VRTK) and Steam VR. Whereas, phase two was built using Vizard and Vizible. This work aims to allow scientists and engineers to discuss the concept design in a real-time VR environment so that they can interact with the objects and review their work before it is deployed. This paper proposes the system architecture and describes the design and implementation of a collaborative virtual assembly environment. The outcome of this work is to be able to resolve communication and interaction problems that arise during the concept-design phase.", "fno": "558400a606", "keywords": [ "Assembling", "CAD", "Groupware", "Product Design", "Production Engineering Computing", "Virtual Reality", "Collaborative Virtual Assembly Environment", "Computer Aided Design Tool", "Complex Product Design", "Concept Design Phase", "Unity 3 D", "Virtual Reality Toolkit", "Steam VR", "Product Design", "Training Tool", "Collaboration", "Three Dimensional Displays", "Solid Modeling", "Training", "Virtual Environments", "Real Time Systems", "Virtual Reality Assembly System Collaborative Virtual Environment Computer Aided Design Collaborative Concept Design" ], "authors": [ { "affiliation": "Bowie State University", "fullName": "Sharad Sharma", "givenName": "Sharad", "surname": "Sharma", "__typename": "ArticleAuthorType" }, { "affiliation": "Bowie State University", "fullName": "Sri-Teja Bodempudi", "givenName": "Sri-Teja", "surname": "Bodempudi", "__typename": "ArticleAuthorType" }, { "affiliation": "Bowie State University", "fullName": "Manik Arrolla", "givenName": "Manik", "surname": "Arrolla", "__typename": "ArticleAuthorType" }, { "affiliation": "Equiskill Insights", "fullName": "Amit Upadhyay", "givenName": "Amit", "surname": "Upadhyay", "__typename": "ArticleAuthorType" } ], "idPrefix": "csci", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "606-611", "year": "2019", "issn": null, "isbn": "978-1-7281-5584-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "558400a600", "articleId": "1jdDMJqwHwk", "__typename": "AdjacentArticleType" }, "next": { "fno": "558400a612", "articleId": "1jdE1CpwVkQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icalt/2018/6049/0/604901a395", "title": "Manual Assembly Training in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/icalt/2018/604901a395/12OmNBKEyqp", "parentPublication": { "id": "proceedings/icalt/2018/6049/0", "title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscwd/2005/0002/1/01504150", "title": "Virtual assembly and tolerance analysis for collaborative design", "doi": null, "abstractUrl": "/proceedings-article/cscwd/2005/01504150/12OmNBVIUzS", "parentPublication": { "id": "proceedings/cscwd/2005/0002/1", "title": "International Conference on Computer Supported Cooperative Work in Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icess/2005/2512/0/25120441", "title": "A Virtual Environment for Collaborative Assembly", "doi": null, "abstractUrl": "/proceedings-article/icess/2005/25120441/12OmNBpmDD5", "parentPublication": { "id": "proceedings/icess/2005/2512/0", "title": "ICESS 2005. Second International Conference on Embedded Software and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056613", "title": "An immersive virtual environment for collaborative geovisualization", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056613/12OmNvCi45l", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isatp/2003/7770/0/01217192", "title": "Experiments of assembly planning in virtual environment", "doi": null, "abstractUrl": "/proceedings-article/isatp/2003/01217192/12OmNwErpDQ", "parentPublication": { "id": "proceedings/isatp/2003/7770/0", "title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscst/2005/2387/0/01553320", "title": "Ontology-based virtual assembly model for collaborative virtual prototyping and simulation", "doi": null, "abstractUrl": "/proceedings-article/iscst/2005/01553320/12OmNy4r3WH", "parentPublication": { "id": "proceedings/iscst/2005/2387/0", "title": "2005 International Symposium on Collaborative Technologies and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dcve/2014/5217/0/07160930", "title": "Collaborative virtual environments for ergonomics: embedding the design engineer role in the loop", "doi": null, "abstractUrl": "/proceedings-article/3dcve/2014/07160930/12OmNyen1vD", "parentPublication": { "id": "proceedings/3dcve/2014/5217/0", "title": "2014 International Workshop on Collaborative Virtual Environments (3DCVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446508", "title": "Evaluating the Effectiveness of Head-Mounted Display Virtual Reality (HMD VR) Environment on Students' Learning for a Virtual Collaborative Engineering Assembly Task", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446508/13bd1rsER1o", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798289", "title": "The Collaborative Virtual Reality Neurorobotics Lab", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798289/1cJ0Q1IA4Pm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a093", "title": "A System for Collaborative Assembly Simulation and User Performance Analysis", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a093/1yBF4m4ghGg", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yBEZe3hqyQ", "title": "2021 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yBF37laMN2", "doi": "10.1109/CW52790.2021.00012", "title": "Experimental Evaluation of Virtual Pottery Systems", "normalizedTitle": "Experimental Evaluation of Virtual Pottery Systems", "abstract": "Virtual reality and creative fabrication tools are still considered a new type of human-computer interface. The powerful technology opens an opportunity for artistic creativity, easy to use, enjoyable and educational. Today, Virtual pottery technical developments augment more engagement with interaction devices. Virtual Reality is evolving the physical/visual interaction concepts as a more reliable robust method, with further advanced visualisation and complex modelling approach. This approach forms a new art line by contributing to communities, museums, exhibitions, and projects using multiple practice methods and mediums. However, integrating various creative tools such as VR apps, 3D modelling software, or devices suitable for modelling and rapid prototyping applications is becoming more difficult due to the rapid change in the development of the component technologies. We present a set of evaluation methods to be considered for our system. Our contribution optimises the outcomes in terms of usability for our novel Virtual Pottery system, configuring traditional pottery modelling, utilising a combination of virtual, 3D modelling and fabrication tools.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality and creative fabrication tools are still considered a new type of human-computer interface. The powerful technology opens an opportunity for artistic creativity, easy to use, enjoyable and educational. Today, Virtual pottery technical developments augment more engagement with interaction devices. Virtual Reality is evolving the physical/visual interaction concepts as a more reliable robust method, with further advanced visualisation and complex modelling approach. This approach forms a new art line by contributing to communities, museums, exhibitions, and projects using multiple practice methods and mediums. However, integrating various creative tools such as VR apps, 3D modelling software, or devices suitable for modelling and rapid prototyping applications is becoming more difficult due to the rapid change in the development of the component technologies. We present a set of evaluation methods to be considered for our system. Our contribution optimises the outcomes in terms of usability for our novel Virtual Pottery system, configuring traditional pottery modelling, utilising a combination of virtual, 3D modelling and fabrication tools.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality and creative fabrication tools are still considered a new type of human-computer interface. The powerful technology opens an opportunity for artistic creativity, easy to use, enjoyable and educational. Today, Virtual pottery technical developments augment more engagement with interaction devices. Virtual Reality is evolving the physical/visual interaction concepts as a more reliable robust method, with further advanced visualisation and complex modelling approach. This approach forms a new art line by contributing to communities, museums, exhibitions, and projects using multiple practice methods and mediums. However, integrating various creative tools such as VR apps, 3D modelling software, or devices suitable for modelling and rapid prototyping applications is becoming more difficult due to the rapid change in the development of the component technologies. We present a set of evaluation methods to be considered for our system. Our contribution optimises the outcomes in terms of usability for our novel Virtual Pottery system, configuring traditional pottery modelling, utilising a combination of virtual, 3D modelling and fabrication tools.", "fno": "406500a025", "keywords": [ "Art", "Data Visualisation", "Pottery", "Rapid Prototyping Industrial", "Solid Modelling", "Virtual Reality", "Creative Fabrication Tools", "Human Computer Interface", "Powerful Technology", "Artistic Creativity", "Interaction Devices", "Virtual Reality", "Reliable Robust Method", "Advanced Visualisation", "Complex Modelling Approach", "Art Line", "Multiple Practice Methods", "Creative Tools", "Rapid Prototyping Applications", "Component Technologies", "Evaluation Methods", "Traditional Pottery Modelling", "Experimental Evaluation", "Virtual Pottery Systems", "Virtual Pottery Technical Developments", "VR Apps", "3 D Modelling Software", "Fabrication", "Deformable Models", "Solid Modeling", "Visualization", "Three Dimensional Displays", "Virtual Reality", "Tools", "Virtual Pottery", "Usability", "Evaluation", "Interaction", "Methods", "Creative Technology" ], "authors": [ { "affiliation": "Cardiff Metropolitan University,Cardiff,United Kingdom", "fullName": "Sarah Dashti", "givenName": "Sarah", "surname": "Dashti", "__typename": "ArticleAuthorType" }, { "affiliation": "Pontificia Universidad Javeriana,Cali,Colombia", "fullName": "AA Navarro-Newball", "givenName": "AA", "surname": "Navarro-Newball", "__typename": "ArticleAuthorType" }, { "affiliation": "Cardiff Metropolitan University,Cardiff,United Kingdom", "fullName": "Edmond Prakash", "givenName": "Edmond", "surname": "Prakash", "__typename": "ArticleAuthorType" }, { "affiliation": "Cardiff Metropolitan University,Cardiff,United Kingdom", "fullName": "Fiaz Hussain", "givenName": "Fiaz", "surname": "Hussain", "__typename": "ArticleAuthorType" }, { "affiliation": "Cardiff Metropolitan University,Cardiff,United Kingdom", "fullName": "Fiona Carroll", "givenName": "Fiona", "surname": "Carroll", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-09-01T00:00:00", "pubType": "proceedings", "pages": "25-32", "year": "2021", "issn": null, "isbn": "978-1-6654-4065-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "406500a017", "articleId": "1yBEZLFBWGA", "__typename": "AdjacentArticleType" }, "next": { "fno": "406500a033", "articleId": "1yBF6AyeY7K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2010/7846/0/05571128", "title": "The Impact of Immersive Virtual Reality on Visualisation for a Design Review in Construction", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571128/12OmNxvwp2w", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710476", "title": "Flat3D: A Shared Virtual 3D World Grown by Creative Activities and Communication through the Network", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710476/12OmNzmclk4", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/02/mcg2018020074", "title": "PotteryGo: A Virtual Pottery Making Training System", "doi": null, "abstractUrl": "/magazine/cg/2018/02/mcg2018020074/13rRUxZRbrW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000a538", "title": "Subjective and Objective Comprehensive Evaluation of Cockpit Operation Efficiency Based on Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000a538/17D45WHONqc", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2022/7480/0/748000a588", "title": "Application of Virtual Simulation in Digital Exhibition and Performance", "doi": null, "abstractUrl": "/proceedings-article/dsc/2022/748000a588/1H44wuI9nRm", "parentPublication": { "id": "proceedings/dsc/2022/7480/0", "title": "2022 7th IEEE International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/05/09910486", "title": "Role of Intricate Pottery Visualization in Ceramic Manufacturing", "doi": null, "abstractUrl": "/magazine/cg/2022/05/09910486/1HcjzG8YN6E", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscloud-edgecom/2020/6550/0/09170994", "title": "Design and Implementation of Virtual Pottery Space Based on Ceramic Cloud Service Platform", "doi": null, "abstractUrl": "/proceedings-article/cscloud-edgecom/2020/09170994/1mqcvogUF1u", "parentPublication": { "id": "proceedings/cscloud-edgecom/2020/6550/0", "title": "2020 7th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/2020 6th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a133", "title": "Virtual Pottery: Deformable Sound Shape Modelling and Fabrication", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a133/1olHzUH6vQI", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09483619", "title": "Virtual Replicas of Real Places: Experimental Investigations", "doi": null, "abstractUrl": "/journal/tg/2022/12/09483619/1vcJrTJdq3m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a352", "title": "Research on Virtual Pottery Teaching System", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a352/1vg7UjsrmDK", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1zWE36wtuCY", "title": "2021 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1zWEcRW1WWk", "doi": "10.1109/3DV53792.2021.00137", "title": "High Fidelity 3D Reconstructions with Limited Physical Views", "normalizedTitle": "High Fidelity 3D Reconstructions with Limited Physical Views", "abstract": "Multi-view triangulation is the gold standard for 3D reconstruction from 2D correspondences given known calibration and sufficient views. However in practice, expensive multi-view setups – involving tens sometimes hundreds of cameras – are required in order to obtain the high fidelity 3D reconstructions necessary for many modern applications. In this paper we present a novel approach that leverages recent advances in 2D-3D lifting using neural shape priors while also enforcing multi-view equivariance. We show how our method can achieve comparable fidelity to expensive calibrated multi-view rigs using a limited (2-3) number of uncalibrated camera views.", "abstracts": [ { "abstractType": "Regular", "content": "Multi-view triangulation is the gold standard for 3D reconstruction from 2D correspondences given known calibration and sufficient views. However in practice, expensive multi-view setups – involving tens sometimes hundreds of cameras – are required in order to obtain the high fidelity 3D reconstructions necessary for many modern applications. In this paper we present a novel approach that leverages recent advances in 2D-3D lifting using neural shape priors while also enforcing multi-view equivariance. We show how our method can achieve comparable fidelity to expensive calibrated multi-view rigs using a limited (2-3) number of uncalibrated camera views.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multi-view triangulation is the gold standard for 3D reconstruction from 2D correspondences given known calibration and sufficient views. However in practice, expensive multi-view setups – involving tens sometimes hundreds of cameras – are required in order to obtain the high fidelity 3D reconstructions necessary for many modern applications. In this paper we present a novel approach that leverages recent advances in 2D-3D lifting using neural shape priors while also enforcing multi-view equivariance. We show how our method can achieve comparable fidelity to expensive calibrated multi-view rigs using a limited (2-3) number of uncalibrated camera views.", "fno": "268800b301", "keywords": [ "Calibration", "Image Reconstruction", "Neural Nets", "Stereo Image Processing", "High Fidelity 3 D Reconstructions", "Physical Views", "Multiview Triangulation", "Gold Standard", "Calibration", "Sufficient Views", "Multiview Equivariance", "Uncalibrated Camera Views", "Calibrated Multiview Rigs", "2 D Correspondences", "2 D 3 D Lifting", "Neural Shape Priors", "Three Dimensional Displays", "Shape", "Cameras", "Calibration", "Multi View Geometry", "3 D Reconstruction", "Deep Learning", "Optimization", "3 D Lifting", "Neural Prior" ], "authors": [ { "affiliation": "Carnegie Mellon University", "fullName": "Mosam Dabhi", "givenName": "Mosam", "surname": "Dabhi", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "Chaoyang Wang", "givenName": "Chaoyang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Apple Inc", "fullName": "Kunal Saluja", "givenName": "Kunal", "surname": "Saluja", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "László A. Jeni", "givenName": "László A.", "surname": "Jeni", "__typename": "ArticleAuthorType" }, { "affiliation": "Apple Inc", "fullName": "Ian Fasel", "givenName": "Ian", "surname": "Fasel", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "Simon Lucey", "givenName": "Simon", "surname": "Lucey", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "1301-1311", "year": "2021", "issn": null, "isbn": "978-1-6654-2688-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "268800b290", "articleId": "1zWEoi7ehZS", "__typename": "AdjacentArticleType" }, "next": { "fno": "268800b312", "articleId": "1zWE5On5lmg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118b486", "title": "Photometric Bundle Adjustment for Dense Multi-view 3D Modeling", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b486/12OmNBp52wC", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a665", "title": "A Real-Time View-Dependent Shape Optimization for High Quality Free-Viewpoint Rendering of 3D Video", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a665/12OmNzdoMQZ", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f600", "title": "H3D-Net: Few-Shot High-Fidelity 3D Head Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f600/1BmG0cJclQA", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/04/09852285", "title": "DSGN++: Exploiting Visual-Spatial Relation for Stereo-Based 3D Detectors", "doi": null, "abstractUrl": "/journal/tp/2023/04/09852285/1FFHaT3cyze", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i625", "title": "Differentiable Stereopsis: Meshes from multiple views using differentiable rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i625/1H0NABVhMdO", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102812", "title": "Low-Frequency Guided Self-Supervised Learning For High-Fidelity 3d Face Reconstruction In The Wild", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102812/1kwr0vb7YsM", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f890", "title": "Towards High-Fidelity 3D Face Reconstruction From In-the-Wild Images Using Graph Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f890/1m3nIM46toI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a064", "title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2021/1952/0/09466267", "title": "Spectral MVIR: Joint Reconstruction of 3D Shape and Spectral Reflectance", "doi": null, "abstractUrl": "/proceedings-article/iccp/2021/09466267/1uSSWr7wnkY", "parentPublication": { "id": "proceedings/iccp/2021/1952/0", "title": "2021 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a494", "title": "Data-Driven 3D Reconstruction of Dressed Humans From Sparse Views", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a494/1zWE8lvtPIA", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAZOJTa", "title": "Point-Based Graphics 2005", "acronym": "pbg", "groupId": "1002154", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNCdTeQ0", "doi": "10.1109/PBG.2005.194069", "title": "A practical structured light acquisition system for point-based geometry and texture", "normalizedTitle": "A practical structured light acquisition system for point-based geometry and texture", "abstract": "We present a simple and high-quality 3D scanning system based on structured light. It uses the common setup of a video projector, a computer-controlled turntable and a single camera. Geometry is acquired using a combination of gray code and phase-shift projections, and it is stored and processed in a point-based representation. We achieve high accuracy by careful calibration of camera, projector, and turntable axis. In addition, we make use of the projector's calibration and extend it to a calibrated light source, allowing for a simple reconstruction of material properties for each surface point. We alternatively use a Lambertian reflectance model, or fit a Phong reflectance model to the samples under different turntable orientations. The acquisition pipeline is entirely point-based, avoiding the need of triangulation during all processing stages.", "abstracts": [ { "abstractType": "Regular", "content": "We present a simple and high-quality 3D scanning system based on structured light. It uses the common setup of a video projector, a computer-controlled turntable and a single camera. Geometry is acquired using a combination of gray code and phase-shift projections, and it is stored and processed in a point-based representation. We achieve high accuracy by careful calibration of camera, projector, and turntable axis. In addition, we make use of the projector's calibration and extend it to a calibrated light source, allowing for a simple reconstruction of material properties for each surface point. We alternatively use a Lambertian reflectance model, or fit a Phong reflectance model to the samples under different turntable orientations. The acquisition pipeline is entirely point-based, avoiding the need of triangulation during all processing stages.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a simple and high-quality 3D scanning system based on structured light. It uses the common setup of a video projector, a computer-controlled turntable and a single camera. Geometry is acquired using a combination of gray code and phase-shift projections, and it is stored and processed in a point-based representation. We achieve high accuracy by careful calibration of camera, projector, and turntable axis. In addition, we make use of the projector's calibration and extend it to a calibrated light source, allowing for a simple reconstruction of material properties for each surface point. We alternatively use a Lambertian reflectance model, or fit a Phong reflectance model to the samples under different turntable orientations. The acquisition pipeline is entirely point-based, avoiding the need of triangulation during all processing stages.", "fno": "01500323", "keywords": [ "Image Texture", "Computer Vision", "Computational Geometry", "Cameras", "Calibration", "Light Sources", "3 D Scanning System", "Structured Light Acquisition System", "Point Based Geometry", "Camera Calibration", "Projector Calibration", "Turntable Axis", "Lambertian Reflectance Model", "Phong Reflectance Model", "Image Processing", "Computer Vision", "Image Texture", "Geometry", "Cameras", "Calibration", "Reflectivity", "Reflective Binary Codes", "Light Sources", "Material Properties", "Surface Fitting", "Surface Reconstruction", "Pipelines" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Swiss Fed. Inst. of Technol., Zurich, Switzerland", "fullName": "F. Sadlo", "givenName": "F.", "surname": "Sadlo", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Swiss Fed. Inst. of Technol., Zurich, Switzerland", "fullName": "T. Weyrich", "givenName": "T.", "surname": "Weyrich", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Swiss Fed. Inst. of Technol., Zurich, Switzerland", "fullName": "R. Peikert", "givenName": "R.", "surname": "Peikert", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Swiss Fed. Inst. of Technol., Zurich, Switzerland", "fullName": "M. Gross", "givenName": "M.", "surname": "Gross", "__typename": "ArticleAuthorType" } ], "idPrefix": "pbg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-03-01T00:00:00", "pubType": "proceedings", "pages": "89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145", "year": "2005", "issn": "1511-7813", "isbn": "3-905673-20-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01500322", "articleId": "12OmNrkT7zM", "__typename": "AdjacentArticleType" }, "next": { "fno": "01500324", "articleId": "12OmNwF0BOt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206764", "title": "Illumination and spatially varying specular reflectance from a single view", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206764/12OmNARRYl8", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2013/5053/0/06475040", "title": "A full-spherical device for simultaneous geometry and reflectance acquisition", "doi": null, "abstractUrl": "/proceedings-article/wacv/2013/06475040/12OmNBSSV8O", "parentPublication": { "id": "proceedings/wacv/2013/5053/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981781", "title": "Simultaneous self-calibration of a projector and a camera using structured light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/282500931", "title": "A Structured Light Range Imaging System Using a Moving Correlation Code", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/282500931/12OmNqyUUBp", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239347", "title": "A kaleidoscopic approach to surround geometry and reflectance acquisition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239347/12OmNs0kyBg", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/06977412", "title": "High-Coverage 3D Scanning through Online Structured Light Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/06977412/12OmNzXFoDb", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501778", "title": "Range Image Acquisition of Objects with Non-Uniform Albedo Using Structured Light Range Sensor", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501778/12OmNzaQocJ", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08827959", "title": "Robust Reflectance Estimation for Projection-Based Appearance Control in a Dynamic Light Environment", "doi": null, "abstractUrl": "/journal/tg/2021/03/08827959/1ddblRDPV8Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2021/1596/0/159600a336", "title": "Research on 360° 3D point cloud reconstruction technology based on turntable and line structured light stereo vision", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2021/159600a336/1wcdmqMmW4g", "parentPublication": { "id": "proceedings/aemcse/2021/1596/0", "title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523844", "title": "Directionally Decomposing Structured Light for Projector Calibration", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAolGQA", "title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "acronym": "aipr", "groupId": "1000046", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJiCPx", "doi": "10.1109/AIPR.2016.8010586", "title": "Auto-calibration of multi-projector systems on arbitrary shapes", "normalizedTitle": "Auto-calibration of multi-projector systems on arbitrary shapes", "abstract": "In this paper we present a new distributed technique to geometrically calibrate multiple casually aligned projectors on a fiducial free arbitrary surface using multiple casually aligned cameras where every point of the surface is seen by at least one camera. Using a multi-step method that uses binary blob patterns, we estimate robustly the display's 3D surface geometry, the cameras' extrinsic parameters, and the intrinsic and extrinsic parameters of the multiple projectors. Thus, our work can enable easy deployment of large scale augmented reality environments playing a fundamental role in increasing their popularity in several applications like geospatial analysis, architectural lighting, cultural heritage restoration, theatrical lighting, training, simulation and visualization.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a new distributed technique to geometrically calibrate multiple casually aligned projectors on a fiducial free arbitrary surface using multiple casually aligned cameras where every point of the surface is seen by at least one camera. Using a multi-step method that uses binary blob patterns, we estimate robustly the display's 3D surface geometry, the cameras' extrinsic parameters, and the intrinsic and extrinsic parameters of the multiple projectors. Thus, our work can enable easy deployment of large scale augmented reality environments playing a fundamental role in increasing their popularity in several applications like geospatial analysis, architectural lighting, cultural heritage restoration, theatrical lighting, training, simulation and visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a new distributed technique to geometrically calibrate multiple casually aligned projectors on a fiducial free arbitrary surface using multiple casually aligned cameras where every point of the surface is seen by at least one camera. Using a multi-step method that uses binary blob patterns, we estimate robustly the display's 3D surface geometry, the cameras' extrinsic parameters, and the intrinsic and extrinsic parameters of the multiple projectors. Thus, our work can enable easy deployment of large scale augmented reality environments playing a fundamental role in increasing their popularity in several applications like geospatial analysis, architectural lighting, cultural heritage restoration, theatrical lighting, training, simulation and visualization.", "fno": "08010586", "keywords": [ "Cameras", "Geometry", "Three Dimensional Displays", "Calibration", "Surface Reconstruction", "Shape", "Geospatial Analysis" ], "authors": [ { "affiliation": "Computer Science Department, University of California Irvine", "fullName": "Mahdi Abbaspour Tehrani", "givenName": "Mahdi Abbaspour", "surname": "Tehrani", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Science Department, University of California Irvine", "fullName": "M. Gopi", "givenName": "M.", "surname": "Gopi", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Science Department, University of California Irvine", "fullName": "Aditi Majumder", "givenName": "Aditi", "surname": "Majumder", "__typename": "ArticleAuthorType" } ], "idPrefix": "aipr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "1-3", "year": "2016", "issn": "2332-5615", "isbn": "978-1-5090-3284-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08010585", "articleId": "12OmNBBQZrr", "__typename": "AdjacentArticleType" }, "next": { "fno": "08010587", "articleId": "12OmNzTYC2l", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223400", "title": "Semi-automatic calibration of a projector-camera system using arbitrary objects with known geometry", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223400/12OmNBJw9RK", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981726", "title": "Fully automatic multi-projector calibration with an uncalibrated camera", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810014", "title": "Auto-Calibration of Multi-Projector Display Walls", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810014/12OmNCb3fwi", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011885", "title": "Novel projector calibration approaches of multi-resolution display", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444797", "title": "Auto-calibration of cylindrical multi-projector systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444797/12OmNviHKkd", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a815", "title": "Efficient Separation Between Projected Patterns for Multiple Projector 3D People Scanning", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a815/12OmNwcCINM", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07164353", "title": "On-Site Semi-Automatic Calibration and Registration of a Projector-Camera System Using Arbitrary Objects with Known Geometry", "doi": null, "abstractUrl": "/journal/tg/2015/11/07164353/13rRUEgs2M6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466021", "title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466021/14M3DYlzziw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/04/08889677", "title": "Automated Geometric Registration for Multi-Projector Displays on Arbitrary 3D Shapes Using Uncalibrated Devices", "doi": null, "abstractUrl": "/journal/tg/2021/04/08889677/1eBugxXEgLe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKJiaV", "title": "Pattern Recognition, International Conference on", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNs0C9zQ", "doi": "10.1109/ICPR.2010.84", "title": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems", "normalizedTitle": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems", "abstract": "In this paper, we propose a method for projecting images onto non-planar screens by using projector-camera systems eliminating distortion in projected images. In this system, point-to-point correspondences in a projector image and a camera image should be extracted. For finding correspondences, the epipolar geometry between a projector and a camera is used. By using dynamic programming method on epipolar lines, correspondences between projector image and camera image are obtained. Furthermore, in order to achieve faster and more robust matching, the non-planar screen is approximately represented by a B-spline surface. The small number of parameters for the B-spline surface are estimated from corresponding pixels on epipolar lines rapidly. Experimental results show the proposed method works well for projecting images onto non-planar screens.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a method for projecting images onto non-planar screens by using projector-camera systems eliminating distortion in projected images. In this system, point-to-point correspondences in a projector image and a camera image should be extracted. For finding correspondences, the epipolar geometry between a projector and a camera is used. By using dynamic programming method on epipolar lines, correspondences between projector image and camera image are obtained. Furthermore, in order to achieve faster and more robust matching, the non-planar screen is approximately represented by a B-spline surface. The small number of parameters for the B-spline surface are estimated from corresponding pixels on epipolar lines rapidly. Experimental results show the proposed method works well for projecting images onto non-planar screens.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a method for projecting images onto non-planar screens by using projector-camera systems eliminating distortion in projected images. In this system, point-to-point correspondences in a projector image and a camera image should be extracted. For finding correspondences, the epipolar geometry between a projector and a camera is used. By using dynamic programming method on epipolar lines, correspondences between projector image and camera image are obtained. Furthermore, in order to achieve faster and more robust matching, the non-planar screen is approximately represented by a B-spline surface. The small number of parameters for the B-spline surface are estimated from corresponding pixels on epipolar lines rapidly. Experimental results show the proposed method works well for projecting images onto non-planar screens.", "fno": "4109a307", "keywords": [ "Projector Camera System", "Non Planar Screen", "Epipolar Geometry", "B Spline Surface" ], "authors": [ { "affiliation": null, "fullName": "Takashi Yamanaka", "givenName": "Takashi", "surname": "Yamanaka", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fumihiko Sakaue", "givenName": "Fumihiko", "surname": "Sakaue", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Sato", "givenName": "Jun", "surname": "Sato", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-08-01T00:00:00", "pubType": "proceedings", "pages": "307-310", "year": "2010", "issn": "1051-4651", "isbn": "978-0-7695-4109-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4109a302", "articleId": "12OmNxGAKUS", "__typename": "AdjacentArticleType" }, "next": { "fno": "4109a311", "articleId": "12OmNyL0Tiu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2001/1272/2/127220504", "title": "A Self-Correcting Projector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2012/4608/0/4608b285", "title": "Research of Color Correction Algorithm for Multi-projector Screen Based on Projector-Camera System", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608b285/12OmNxwENpp", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/3/07503815", "title": "A Novel Method for Camera Planar Motion Detection and Robust Estimation of the 1D Trifocal Tensor", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07503815/12OmNxwncDf", "parentPublication": { "id": "proceedings/icpr/2000/0750/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/3/169530676", "title": "Recovering Structures and Motions from Mutual Projection of Cameras", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169530676/12OmNywfKz2", "parentPublication": { "id": "proceedings/icpr/2002/1695/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315159", "title": "A flexible projector-camera system for multi-planar displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315159/12OmNzBwGyN", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460078", "title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/1/3521a462", "title": "A Novel Binary Code Based Projector-Camera System Registration Method", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521a462/12OmNzYwcew", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2005/12/i1845", "title": "Autocalibration of a Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNwkzupV", "doi": "10.1109/ISMAR.2014.6948421", "title": "Sticky projections — A new approach to interactive shader lamp tracking", "normalizedTitle": "Sticky projections — A new approach to interactive shader lamp tracking", "abstract": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking arbitrarily shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. In the initialization phase a dense point cloud of the physical object is reconstructed and precisely matched to the virtual model to perfectly overlay the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Quantitative experiments show that our approach can robustly track complex objects at interactive rates.", "abstracts": [ { "abstractType": "Regular", "content": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking arbitrarily shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. In the initialization phase a dense point cloud of the physical object is reconstructed and precisely matched to the virtual model to perfectly overlay the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Quantitative experiments show that our approach can robustly track complex objects at interactive rates.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking arbitrarily shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. In the initialization phase a dense point cloud of the physical object is reconstructed and precisely matched to the virtual model to perfectly overlay the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Quantitative experiments show that our approach can robustly track complex objects at interactive rates.", "fno": "06948421", "keywords": [ "Three Dimensional Displays", "Cameras", "Tracking", "Iterative Closest Point Algorithm", "Feature Extraction", "Graphics Processing Units", "Mobile Communication" ], "authors": [ { "affiliation": "EXTEND3D GmbH", "fullName": "Christoph Resch", "givenName": "Christoph", "surname": "Resch", "__typename": "ArticleAuthorType" }, { "affiliation": "EXTEND3D GmbH", "fullName": "Peter Keitler", "givenName": "Peter", "surname": "Keitler", "__typename": "ArticleAuthorType" }, { "affiliation": "TU München", "fullName": "Gudrun Klinker", "givenName": "Gudrun", "surname": "Klinker", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "151-156", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948420", "articleId": "12OmNAXPykk", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948422", "articleId": "12OmNx7G661", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209c263", "title": "Robust Real-Time Extreme Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c263/12OmNBBQZpN", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/06977431", "title": "Hybrid On-Line 3D Face and Facial Actions Tracking in RGBD Video Sequences", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/06977431/12OmNCwlafG", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671777", "title": "Real-time RGB-D camera relocalization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671777/12OmNqEAT3B", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836478", "title": "Low-Cost Depth Camera Pose Tracking for Mobile Platforms", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836478/12OmNvkYxbb", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2013/5001/0/06655785", "title": "A Robust Real-Time Face Tracking Using Head Pose Estimation for a Markerless AR System", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655785/12OmNxvwoYX", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325479", "title": "Digital facial augmentation for interactive entertainment", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325479/12OmNy5R3ES", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a148", "title": "[POSTER] Rubix: Dynamic Spatial Augmented Reality by Extraction of Plane Regions with a RGB-D Camera", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a148/12OmNyKJicb", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a028", "title": "FaceCept3D: Real Time 3D Face Tracking and Analysis", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a028/12OmNzzP5GM", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/03/07138633", "title": "Sticky Projections-A Model-Based Approach to Interactive Shader Lamps Tracking", "doi": null, "abstractUrl": "/journal/tg/2016/03/07138633/13rRUxly8XI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794641", "title": "Projection Distortion-based Object Tracking in Shader Lamp Scenarios", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794641/1cPXBdjp9yo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNClQ0o4", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNybfr0f", "doi": "10.1109/CVPRW.2010.5544604", "title": "One-shot scanning method using an unealibrated projector and camera system", "normalizedTitle": "One-shot scanning method using an unealibrated projector and camera system", "abstract": "In this paper, we describe a new one-shot scanning technique using a camera and a projector. Generally, a 3D measurement system based on a camera and a projector requires pre-calibration, such as the measurement of the relative position of these devices. If we can eliminate the calibration process, it would greatly improve the convenience of the system. For example, a single capture by a hand-held camera of an object illuminated by a hand-held projector would then allow to reconstruct the object shape. To achieve this, we propose a self-calibration technique using a projected grid pattern, computing the relative pose of projector and camera. This is similar to the relative pose or motion problem for two cameras, but in our case correspondences are not explicitly given. The actual algorithm is based on a simple exhaustive search of a finite set of hypotheses, with a cost function based on the epipolar constraint. In the experiments, successful reconstructions with our proposed method using synthetic and read data are presented.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we describe a new one-shot scanning technique using a camera and a projector. Generally, a 3D measurement system based on a camera and a projector requires pre-calibration, such as the measurement of the relative position of these devices. If we can eliminate the calibration process, it would greatly improve the convenience of the system. For example, a single capture by a hand-held camera of an object illuminated by a hand-held projector would then allow to reconstruct the object shape. To achieve this, we propose a self-calibration technique using a projected grid pattern, computing the relative pose of projector and camera. This is similar to the relative pose or motion problem for two cameras, but in our case correspondences are not explicitly given. The actual algorithm is based on a simple exhaustive search of a finite set of hypotheses, with a cost function based on the epipolar constraint. In the experiments, successful reconstructions with our proposed method using synthetic and read data are presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we describe a new one-shot scanning technique using a camera and a projector. Generally, a 3D measurement system based on a camera and a projector requires pre-calibration, such as the measurement of the relative position of these devices. If we can eliminate the calibration process, it would greatly improve the convenience of the system. For example, a single capture by a hand-held camera of an object illuminated by a hand-held projector would then allow to reconstruct the object shape. To achieve this, we propose a self-calibration technique using a projected grid pattern, computing the relative pose of projector and camera. This is similar to the relative pose or motion problem for two cameras, but in our case correspondences are not explicitly given. The actual algorithm is based on a simple exhaustive search of a finite set of hypotheses, with a cost function based on the epipolar constraint. In the experiments, successful reconstructions with our proposed method using synthetic and read data are presented.", "fno": "05544604", "keywords": [ "Calibration", "Cameras", "Data Acquisition", "Measurement Systems", "Motion Estimation", "Pose Estimation", "One Shot Scanning Method", "Uncalibrated Projector", "Camera System", "3 D Measurement System", "Self Calibration Technique", "Projected Grid Pattern", "Relative Pose", "Cost Function", "Epipolar Constraint", "Motion Problem", "Cameras" ], "authors": [ { "affiliation": "Kagoshima University, Japan", "fullName": "Hiroshi Kawasaki", "givenName": "Hiroshi", "surname": "Kawasaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Advanced Industrial Science and Technology, Japan", "fullName": "Ryusuke Sagawa", "givenName": "Ryusuke", "surname": "Sagawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Osaka University, Japan", "fullName": "Yasushi Yagi", "givenName": "Yasushi", "surname": "Yagi", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University, Japan", "fullName": "Ryo Furukawa", "givenName": "Ryo", "surname": "Furukawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University, Japan", "fullName": "Naoki Asada", "givenName": "Naoki", "surname": "Asada", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Rhone-Alpes, France", "fullName": "Peter Sturm", "givenName": "Peter", "surname": "Sturm", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-06-01T00:00:00", "pubType": "proceedings", "pages": "104-111", "year": "2010", "issn": "2160-7508", "isbn": "978-1-4244-7029-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05544603", "articleId": "12OmNy2rRXA", "__typename": "AdjacentArticleType" }, "next": { "fno": "05544605", "articleId": "12OmNxX3uGB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2009/3994/0/05204318", "title": "Multi-view reconstruction for projector camera systems based on bundle adjustment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204318/12OmNAS9zxR", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270475", "title": "Projector Calibration using Arbitrary Planes and Calibrated Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a449", "title": "Projection Center Calibration for a Co-located Projector Camera System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315159", "title": "A flexible projector-camera system for multi-planar displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315159/12OmNzBwGyN", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460078", "title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315067", "title": "Making one object look like another: controlling appearance using a projector-camera system", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315067/12OmNzcPAjA", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699178", "title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNClQ0o4", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzUxO57", "doi": "10.1109/CVPRW.2010.5543474", "title": "Camera-projector matching using an unstructured video stream", "normalizedTitle": "Camera-projector matching using an unstructured video stream", "abstract": "This paper presents a novel approach for matching 2D points between a video projector and a digital camera. Our method is motivated by camera-projector applications for which the projected image needs to be warped to prevent geometric distortion. Since the warping process often needs geometric information on the 3D scene that can only be obtained from triangulation, we propose a technique for matching points in the projector to points in the camera based on arbitrary video sequences. The novelty of our method lies in the fact that it does not require the use of pre-designed structured light patterns as is usually the case. The back bone of our application lies in a function that matches activity patterns instead of colors. This makes our method robust to pose, to severe photometric and geometric distortions. It also does not require calibration of the color response curve of the camera-projector system. We present quantitative and qualitative results with synthetic and real life examples, and compare the proposed method with the scale invariant feature transform (SIFT) method and with a state-of-the-art structured light technique. We show that our method performs almost as well as structured light methods and significantly outperforms SIFT when the contrast of the video captured by the camera has been degraded.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel approach for matching 2D points between a video projector and a digital camera. Our method is motivated by camera-projector applications for which the projected image needs to be warped to prevent geometric distortion. Since the warping process often needs geometric information on the 3D scene that can only be obtained from triangulation, we propose a technique for matching points in the projector to points in the camera based on arbitrary video sequences. The novelty of our method lies in the fact that it does not require the use of pre-designed structured light patterns as is usually the case. The back bone of our application lies in a function that matches activity patterns instead of colors. This makes our method robust to pose, to severe photometric and geometric distortions. It also does not require calibration of the color response curve of the camera-projector system. We present quantitative and qualitative results with synthetic and real life examples, and compare the proposed method with the scale invariant feature transform (SIFT) method and with a state-of-the-art structured light technique. We show that our method performs almost as well as structured light methods and significantly outperforms SIFT when the contrast of the video captured by the camera has been degraded.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel approach for matching 2D points between a video projector and a digital camera. Our method is motivated by camera-projector applications for which the projected image needs to be warped to prevent geometric distortion. Since the warping process often needs geometric information on the 3D scene that can only be obtained from triangulation, we propose a technique for matching points in the projector to points in the camera based on arbitrary video sequences. The novelty of our method lies in the fact that it does not require the use of pre-designed structured light patterns as is usually the case. The back bone of our application lies in a function that matches activity patterns instead of colors. This makes our method robust to pose, to severe photometric and geometric distortions. It also does not require calibration of the color response curve of the camera-projector system. We present quantitative and qualitative results with synthetic and real life examples, and compare the proposed method with the scale invariant feature transform (SIFT) method and with a state-of-the-art structured light technique. We show that our method performs almost as well as structured light methods and significantly outperforms SIFT when the contrast of the video captured by the camera has been degraded.", "fno": "05543474", "keywords": [ "Computational Geometry", "Image Matching", "Image Reconstruction", "Image Sequences", "Motion Estimation", "Transforms", "Camera Projector Matching", "Unstructured Video Stream", "Digital Camera", "Video Projector", "Geometric Distortion", "Triangulation", "Arbitrary Video Sequences", "Scale Invariant Feature Transform", "Streaming Media", "Digital Cameras", "Layout", "Video Sequences", "Bones", "Pattern Matching", "Robustness", "Photometry", "Calibration", "Degradation" ], "authors": [ { "affiliation": "Institute of Information Technology, National Research Council Canada, Ottawa, Canada", "fullName": "Marc-Antoine Drouin", "givenName": "Marc-Antoine", "surname": "Drouin", "__typename": "ArticleAuthorType" }, { "affiliation": "MOIVRE, Université de Sherbrooke, Canada", "fullName": "Pierre-Marc Jodoin", "givenName": "Pierre-Marc", "surname": "Jodoin", "__typename": "ArticleAuthorType" }, { "affiliation": "MOIVRE, Université de Sherbrooke, Canada", "fullName": "Julien Prémont", "givenName": "Julien", "surname": "Prémont", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-06-01T00:00:00", "pubType": "proceedings", "pages": "33-40", "year": "2010", "issn": "2160-7508", "isbn": "978-1-4244-7029-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05543472", "articleId": "12OmNyuy9Ur", "__typename": "AdjacentArticleType" }, "next": { "fno": "05543464", "articleId": "12OmNxE2mGB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2011/0529/0/05981781", "title": "Simultaneous self-calibration of a projector and a camera using structured light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/2005/2327/0/23270302", "title": "Uncalibrated Multiple Image Stereo System with Arbitrarily Movable Camera and Projector for Wide Range Scanning", "doi": null, "abstractUrl": "/proceedings-article/3dim/2005/23270302/12OmNCcKQrZ", "parentPublication": { "id": "proceedings/3dim/2005/2327/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204319", "title": "A user-friendly method to geometrically calibrate projector-camera systems", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204319/12OmNwE9OQL", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1368", "title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467579", "title": "A projector-camera system with real-time photometric adaptation for dynamic environments", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467579/1htC67moXAs", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382896", "title": "DeProCams: Simultaneous Relighting, Compensation and Shape Reconstruction for Projector-Camera Systems", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382896/1saZvVKgpFK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1O0IjR8k", "doi": "10.1109/ISMAR-Adjunct.2018.00023", "title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "normalizedTitle": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "abstract": "Existing camera-projector calibration methods typically warp feature points from a camera image to a projector image using estimated homographies, and often suffer from errors in camera parameters and noise due to imperfect planarity of the calibration target. In this paper we propose a simple yet robust solution that explicitly deals with these challenges. Following the structured light (SL) camera-project calibration framework, a carefully designed correspondence algorithm is built on top of the De Bruijn patterns. Such correspondence is then used for initial camera-projector calibration. Then, to gain more robustness against noises, especially those from an imperfect planar calibration board, a bundle adjustment algorithm is developed to jointly optimize the estimated camera and projector models. Aside from the robustness, our solution requires only one shot of SL pattern for each calibration board pose, which is much more convenient than multi-shot solutions in practice. Data validations are conducted on both synthetic and real datasets, and our method shows clear advantages over existing methods in all experiments.", "abstracts": [ { "abstractType": "Regular", "content": "Existing camera-projector calibration methods typically warp feature points from a camera image to a projector image using estimated homographies, and often suffer from errors in camera parameters and noise due to imperfect planarity of the calibration target. In this paper we propose a simple yet robust solution that explicitly deals with these challenges. Following the structured light (SL) camera-project calibration framework, a carefully designed correspondence algorithm is built on top of the De Bruijn patterns. Such correspondence is then used for initial camera-projector calibration. Then, to gain more robustness against noises, especially those from an imperfect planar calibration board, a bundle adjustment algorithm is developed to jointly optimize the estimated camera and projector models. Aside from the robustness, our solution requires only one shot of SL pattern for each calibration board pose, which is much more convenient than multi-shot solutions in practice. Data validations are conducted on both synthetic and real datasets, and our method shows clear advantages over existing methods in all experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Existing camera-projector calibration methods typically warp feature points from a camera image to a projector image using estimated homographies, and often suffer from errors in camera parameters and noise due to imperfect planarity of the calibration target. In this paper we propose a simple yet robust solution that explicitly deals with these challenges. Following the structured light (SL) camera-project calibration framework, a carefully designed correspondence algorithm is built on top of the De Bruijn patterns. Such correspondence is then used for initial camera-projector calibration. Then, to gain more robustness against noises, especially those from an imperfect planar calibration board, a bundle adjustment algorithm is developed to jointly optimize the estimated camera and projector models. Aside from the robustness, our solution requires only one shot of SL pattern for each calibration board pose, which is much more convenient than multi-shot solutions in practice. Data validations are conducted on both synthetic and real datasets, and our method shows clear advantages over existing methods in all experiments.", "fno": "08699178", "keywords": [ "Calibration", "Cameras", "Optical Projectors", "Parameter Estimation", "Pose Estimation", "Multishot Solutions", "Single Shot Per Pose Camera Projector Calibration System", "Structured Light Camera Project Calibration Framework", "Imperfect Planar Calibration Board", "Homography Estimation", "Camera Parameter Estimation", "Camera Projector Calibration Methods", "Projector Imaging Models", "SL Camera Project Calibration Framework", "De Bruijn Patterns", "Bundle Adjustment Algorithm", "Calibration", "Cameras", "Image Color Analysis", "Bundle Adjustment", "Robustness", "Encoding", "Computing Methodologies X 2014 Camera Calibration", "Computing Methodologies X 2014 3 D Imaging", "Computing Methodologies X 2014 Reconstruction" ], "authors": [ { "affiliation": "Temple Univeristy", "fullName": "Bingyao Huang", "givenName": "Bingyao", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Rowan Univeristy", "fullName": "Samed Ozdemir", "givenName": "Samed", "surname": "Ozdemir", "__typename": "ArticleAuthorType" }, { "affiliation": "Rowan Univeristy", "fullName": "Ying Tang", "givenName": "Ying", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": "HiScene Info. Technologies", "fullName": "Chunyuan Liao", "givenName": "Chunyuan", "surname": "Liao", "__typename": "ArticleAuthorType" }, { "affiliation": "Temple Univeristy", "fullName": "Haibin Ling", "givenName": "Haibin", "surname": "Ling", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "15-20", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699313", "articleId": "19F1QYpUBj2", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699304", "articleId": "19F1T4QjgOY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2001/1272/2/127220504", "title": "A Self-Correcting Projector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d596", "title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a449", "title": "Projection Center Calibration for a Co-located Projector Camera System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315159", "title": "A flexible projector-camera system for multi-planar displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315159/12OmNzBwGyN", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1368", "title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523844", "title": "Directionally Decomposing Structured Light for Projector Calibration", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzIUg0M", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "acronym": "icig", "groupId": "1001790", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNASraPv", "doi": "10.1109/ICIG.2013.156", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "normalizedTitle": "Holographic Projection Using Converging Spherical Wave Illumination", "abstract": "A holographic projection system using converging spherical wave illumination has been presented. The system takes into account the combination of Fresnel holographic projection and Fourier holographic projection. The effect of pixelated spatial light modulator is analyzed. By adding the quadratic phase of diffractive lens to the phase of the generated hologram, the separation of image plane from Fourier plane is achieved. Meanwhile, the zero-order beam and high diffraction orders can be filtered out by higher pass filter and aperture placed in the Fourier plane. A holographic projection system based on liquid crystal on silicon is set up. Experimental results show that not only Fresnel holographic projection but also Fourier holographic projection can be achieved without zero order beam and higher diffraction orders in this universal system.", "abstracts": [ { "abstractType": "Regular", "content": "A holographic projection system using converging spherical wave illumination has been presented. The system takes into account the combination of Fresnel holographic projection and Fourier holographic projection. The effect of pixelated spatial light modulator is analyzed. By adding the quadratic phase of diffractive lens to the phase of the generated hologram, the separation of image plane from Fourier plane is achieved. Meanwhile, the zero-order beam and high diffraction orders can be filtered out by higher pass filter and aperture placed in the Fourier plane. A holographic projection system based on liquid crystal on silicon is set up. Experimental results show that not only Fresnel holographic projection but also Fourier holographic projection can be achieved without zero order beam and higher diffraction orders in this universal system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A holographic projection system using converging spherical wave illumination has been presented. The system takes into account the combination of Fresnel holographic projection and Fourier holographic projection. The effect of pixelated spatial light modulator is analyzed. By adding the quadratic phase of diffractive lens to the phase of the generated hologram, the separation of image plane from Fourier plane is achieved. Meanwhile, the zero-order beam and high diffraction orders can be filtered out by higher pass filter and aperture placed in the Fourier plane. A holographic projection system based on liquid crystal on silicon is set up. Experimental results show that not only Fresnel holographic projection but also Fourier holographic projection can be achieved without zero order beam and higher diffraction orders in this universal system.", "fno": "5050a761", "keywords": [ "Diffraction", "Optical Diffraction", "Lenses", "Laser Beams", "Holography", "Holographic Optical Components", "Optical Imaging", "Liquid Crystal On Silicon", "Holographic Projection", "Spherical Wave Illumination", "Fourier Plane" ], "authors": [ { "affiliation": null, "fullName": "Shen Chuan", "givenName": "Shen", "surname": "Chuan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhang Cheng", "givenName": "Zhang", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Cheng Hong", "givenName": "Cheng", "surname": "Hong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhang Fen", "givenName": "Zhang", "surname": "Fen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wei Sui", "givenName": "Wei", "surname": "Sui", "__typename": "ArticleAuthorType" } ], "idPrefix": "icig", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-07-01T00:00:00", "pubType": "proceedings", "pages": "761-765", "year": "2013", "issn": null, "isbn": "978-0-7695-5050-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5050a757", "articleId": "12OmNzZWbzd", "__typename": "AdjacentArticleType" }, "next": { "fno": "5050a766", "articleId": "12OmNyoSbcZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/eqec/2005/8973/0/01567314", "title": "Fresnel diffraction from polygonal apertures", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567314/12OmNAolGVA", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mppoi/1994/5832/0/00336622", "title": "Holographic optical interconnections", "doi": null, "abstractUrl": "/proceedings-article/mppoi/1994/00336622/12OmNBqv286", "parentPublication": { "id": "proceedings/mppoi/1994/5832/0", "title": "First International Workshop on Massively Parallel Processing Using Optical Interconnections", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/2/4077c482", "title": "Imaging Research of Fresnel Holography", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077c482/12OmNrJiCXa", "parentPublication": { "id": "proceedings/icicta/2010/4077/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b058", "title": "Digital Holography Used for Shape Measurement of Microscopic Object", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b058/12OmNvDI3U4", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05750979", "title": "Research on Resolution of the Volume Holography Imaging Systems", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750979/12OmNwFicVh", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fbie/2008/3561/0/3561a056", "title": "Information Processing in Digital Holographic Microscopy", "doi": null, "abstractUrl": "/proceedings-article/fbie/2008/3561a056/12OmNxjjEjY", "parentPublication": { "id": "proceedings/fbie/2008/3561/0", "title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nvmt/1996/3510/0/00534666", "title": "Holographic 3D disks", "doi": null, "abstractUrl": "/proceedings-article/nvmt/1996/00534666/12OmNxzuMLd", "parentPublication": { "id": "proceedings/nvmt/1996/3510/0", "title": "Proceedings of Nonvolatile Memory Technology Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2013/2460/0/06710366", "title": "High speed water monitoring systems based on Digital Holographic Microscopy", "doi": null, "abstractUrl": "/proceedings-article/csit/2013/06710366/12OmNyQGS1m", "parentPublication": { "id": "proceedings/csit/2013/2460/0", "title": "2013 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1975/04/01672829", "title": "Ultrasonic Holographic Fourier Spectroscopy via Optical Fourier Transforms", "doi": null, "abstractUrl": "/journal/tc/1975/04/01672829/13rRUIIVlbc", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrF2DIf", "title": "Seventh Biennial IEEE International Nonvolatile Memory Technology Conference. Proceedings", "acronym": "nvmt", "groupId": "1000505", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNvjQ91k", "doi": "10.1109/NVMT.1998.723221", "title": "Nonvolatile optical storage in photorefractive crystals", "normalizedTitle": "Nonvolatile optical storage in photorefractive crystals", "abstract": "We discuss the state-of-the-art for optical holographic storage of information in photorefractive crystals. In particular, we have recently been successful in storing 3D images and 10 /spl mu/m optical interconnects. Holography is a common technique used to generate realistic 3D images. Photorefractive crystals are an ideal storage medium for recording holographic images because of several advantages, such as real-time exposure and display, a simple recording process in which no pre- or post-processing is required, low writing beam powers, and a potentially large storage volume. Recent experiments have clearly shown the potential of photorefractive crystals for 3D image storage and retrieval. In this paper, we report the first demonstration, to our knowledge, of the corresponding storage and retrieval of 3D color holograms in a photorefractive crystal. The 3D image reproduces the colors of the object and is visible over a wide perspective as demonstrated by moving one's head back and forth while viewing the hologram. The wide field-of-view of the hologram is also demonstrated using an imaging lens with a color CCD camera mounted on a goniometer to record various perspectives. In addition to storage of 3D images, we have also been successful at nonvolatile storage of two dimensional waveguides or optical interconnects in bulk crystals or potential substrates for electronic applications.", "abstracts": [ { "abstractType": "Regular", "content": "We discuss the state-of-the-art for optical holographic storage of information in photorefractive crystals. In particular, we have recently been successful in storing 3D images and 10 /spl mu/m optical interconnects. Holography is a common technique used to generate realistic 3D images. Photorefractive crystals are an ideal storage medium for recording holographic images because of several advantages, such as real-time exposure and display, a simple recording process in which no pre- or post-processing is required, low writing beam powers, and a potentially large storage volume. Recent experiments have clearly shown the potential of photorefractive crystals for 3D image storage and retrieval. In this paper, we report the first demonstration, to our knowledge, of the corresponding storage and retrieval of 3D color holograms in a photorefractive crystal. The 3D image reproduces the colors of the object and is visible over a wide perspective as demonstrated by moving one's head back and forth while viewing the hologram. The wide field-of-view of the hologram is also demonstrated using an imaging lens with a color CCD camera mounted on a goniometer to record various perspectives. In addition to storage of 3D images, we have also been successful at nonvolatile storage of two dimensional waveguides or optical interconnects in bulk crystals or potential substrates for electronic applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We discuss the state-of-the-art for optical holographic storage of information in photorefractive crystals. In particular, we have recently been successful in storing 3D images and 10 /spl mu/m optical interconnects. Holography is a common technique used to generate realistic 3D images. Photorefractive crystals are an ideal storage medium for recording holographic images because of several advantages, such as real-time exposure and display, a simple recording process in which no pre- or post-processing is required, low writing beam powers, and a potentially large storage volume. Recent experiments have clearly shown the potential of photorefractive crystals for 3D image storage and retrieval. In this paper, we report the first demonstration, to our knowledge, of the corresponding storage and retrieval of 3D color holograms in a photorefractive crystal. The 3D image reproduces the colors of the object and is visible over a wide perspective as demonstrated by moving one's head back and forth while viewing the hologram. The wide field-of-view of the hologram is also demonstrated using an imaging lens with a color CCD camera mounted on a goniometer to record various perspectives. In addition to storage of 3D images, we have also been successful at nonvolatile storage of two dimensional waveguides or optical interconnects in bulk crystals or potential substrates for electronic applications.", "fno": "00723221", "keywords": [ "Holographic Storage", "Optical Storage", "Optical Interconnections", "Holography", "Optical Waveguides", "Colour", "Photorefractive Materials", "Nonvolatile Optical Storage", "Photorefractive Crystals", "Optical Holographic Storage", "3 D Image Storage", "Optical Interconnects", "Holography", "3 D Image Generation", "Holographic Image Recording", "Storage Medium", "Real Time Exposure", "Real Time Display", "Recording Process", "Writing Beam Power", "Storage Volume", "3 D Image Retrieval", "3 D Color Holograms", "Object Color Reproduction", "Hologram Field Of View", "Imaging Lens", "Color CCD Camera", "Goniometer", "Nonvolatile Storage", "2 D Waveguide Storage", "2 D Optical Interconnect Storage", "Bulk Crystals", "Electronic Applications", "Photorefractive Materials", "Image Storage", "Crystals", "Holography", "Optical Interconnections", "Holographic Optical Components", "Optical Recording", "Optical Waveguides", "Image Generation", "Displays" ], "authors": [ { "affiliation": "Dept. of Phys., Arkansas Univ., Fayetteville, AR, USA", "fullName": "G. Salamo", "givenName": "G.", "surname": "Salamo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "M. Klotz", "givenName": "M.", "surname": "Klotz", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "H. Meng", "givenName": "H.", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "R.J. Anderson", "givenName": "R.J.", "surname": "Anderson", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "C.A. Heid", "givenName": "C.A.", "surname": "Heid", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "B.P. Ketchel", "givenName": "B.P.", "surname": "Ketchel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "G.L. Wood", "givenName": "G.L.", "surname": "Wood", "__typename": "ArticleAuthorType" } ], "idPrefix": "nvmt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-01-01T00:00:00", "pubType": "proceedings", "pages": "66,67", "year": "1998", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00723220", "articleId": "12OmNvUaNfQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "00723222", "articleId": "12OmNCfAPBW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icis/2010/4147/0/4147a836", "title": "Three-Dimensional TV Using Holographic Stereogram", "doi": null, "abstractUrl": "/proceedings-article/icis/2010/4147a836/12OmNAY79mF", "parentPublication": { "id": "proceedings/icis/2010/4147/0", "title": "Computer and Information Science, ACIS International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1989/1911/1/00047191", "title": "Approaches to optical microprogramming", "doi": null, "abstractUrl": "/proceedings-article/hicss/1989/00047191/12OmNBajTM8", "parentPublication": { "id": "proceedings/hicss/1989/1911/1", "title": "Proceedings of the Twenty-Second Annual Hawaii International Conference on System Sciences. Volume 1: Architecture Track", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdl/2002/7350/0/01022778", "title": "Electrical and optical properties of discotic liquid crystals with various core structures", "doi": null, "abstractUrl": "/proceedings-article/icdl/2002/01022778/12OmNCmpcT5", "parentPublication": { "id": "proceedings/icdl/2002/7350/0", "title": "Proceedings of 14th International Conference on Dielectric Liquids", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/1992/2717/0/00227478", "title": "Optical techniques for image compression", "doi": null, "abstractUrl": "/proceedings-article/dcc/1992/00227478/12OmNCxL9Rj", "parentPublication": { "id": "proceedings/dcc/1992/2717/0", "title": "1992 Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300078", "title": "Holographic Video Display of Time-Series Volumetric Medical Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300078/12OmNx76TAX", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1989/1911/1/00047188", "title": "A coherent system for performing an optical transform", "doi": null, "abstractUrl": "/proceedings-article/hicss/1989/00047188/12OmNx8wTlX", "parentPublication": { "id": "proceedings/hicss/1989/1911/1", "title": "Proceedings of the Twenty-Second Annual Hawaii International Conference on System Sciences. Volume 1: Architecture Track", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nvmt/1998/4518/0/00723226", "title": "Bacteriorhodopsin-based volumetric optical memory", "doi": null, "abstractUrl": "/proceedings-article/nvmt/1998/00723226/12OmNxaw5aM", "parentPublication": { "id": "proceedings/nvmt/1998/4518/0", "title": "Seventh Biennial IEEE International Nonvolatile Memory Technology Conference. Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2013/2460/0/06710366", "title": "High speed water monitoring systems based on Digital Holographic Microscopy", "doi": null, "abstractUrl": "/proceedings-article/csit/2013/06710366/12OmNyQGS1m", "parentPublication": { "id": "proceedings/csit/2013/2460/0", "title": "2013 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdl/2002/7350/0/01022777", "title": "Optical property of photonic crystals infiltrated with various liquids and liquid crystals", "doi": null, "abstractUrl": "/proceedings-article/icdl/2002/01022777/12OmNzlUKrK", "parentPublication": { "id": "proceedings/icdl/2002/7350/0", "title": "Proceedings of 14th International Conference on Dielectric Liquids", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1975/04/01672829", "title": "Ultrasonic Holographic Fourier Spectroscopy via Optical Fourier Transforms", "doi": null, "abstractUrl": "/journal/tc/1975/04/01672829/13rRUIIVlbc", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzQhP7Z", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "acronym": "isot", "groupId": "1002942", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNyS6RIa", "doi": "10.1109/ISOT.2014.62", "title": "Holographic Position Measurements of an Optically-Trapped Gold Nanoparticle Using Twilight-Field Microscope", "normalizedTitle": "Holographic Position Measurements of an Optically-Trapped Gold Nanoparticle Using Twilight-Field Microscope", "abstract": "We demonstrated three-dimensional (3D) position measurements of a gold nanoparticle held in optical tweezers in water. The position measurements were performed with an in-line, low-coherence digital holographic microscope with the twilight-field technique. The position of the optically-trapped nanoparticle with a diameter of 60 nm had an axial variation of 7.6 nm in the standard deviation when a 1070 nm laser beam with an intensity of more than 27 MW/cm2 was focused with a 1.25 NA objective lens.", "abstracts": [ { "abstractType": "Regular", "content": "We demonstrated three-dimensional (3D) position measurements of a gold nanoparticle held in optical tweezers in water. The position measurements were performed with an in-line, low-coherence digital holographic microscope with the twilight-field technique. The position of the optically-trapped nanoparticle with a diameter of 60 nm had an axial variation of 7.6 nm in the standard deviation when a 1070 nm laser beam with an intensity of more than 27 MW/cm2 was focused with a 1.25 NA objective lens.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We demonstrated three-dimensional (3D) position measurements of a gold nanoparticle held in optical tweezers in water. The position measurements were performed with an in-line, low-coherence digital holographic microscope with the twilight-field technique. The position of the optically-trapped nanoparticle with a diameter of 60 nm had an axial variation of 7.6 nm in the standard deviation when a 1070 nm laser beam with an intensity of more than 27 MW/cm2 was focused with a 1.25 NA objective lens.", "fno": "07119426", "keywords": [ "Holography", "Holographic Optical Components", "Gold", "High Speed Optical Techniques", "Optical Mixing", "Charge Carrier Processes", "Optical Sensors", "Gold Nanoparticle", "Digital Holography", "Three Dimensional Target Tracking", "Optical Tweezers", "Optical Manipulation" ], "authors": [ { "affiliation": null, "fullName": "Kazufumi Goto", "givenName": "Kazufumi", "surname": "Goto", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yoshio Hayasaki", "givenName": "Yoshio", "surname": "Hayasaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "isot", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-11-01T00:00:00", "pubType": "proceedings", "pages": "232-233", "year": "2014", "issn": null, "isbn": "978-1-4673-6752-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07119425", "articleId": "12OmNzcPA9B", "__typename": "AdjacentArticleType" }, "next": { "fno": "07119427", "articleId": "12OmNzYwbVv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mppoi/1994/5832/0/00336622", "title": "Holographic optical interconnections", "doi": null, "abstractUrl": "/proceedings-article/mppoi/1994/00336622/12OmNBqv286", "parentPublication": { "id": "proceedings/mppoi/1994/5832/0", "title": "First International Workshop on Massively Parallel Processing Using Optical Interconnections", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsid/2016/8700/0/8700a603", "title": "Reconfiguration Performance Recovery on Optically Reconfigurable Gate Arrays", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2016/8700a603/12OmNvA1h4S", "parentPublication": { "id": "proceedings/vlsid/2016/8700/0", "title": "2016 29th International Conference on VLSI Design and 2016 15th International Conference on Embedded Systems (VLSID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a536", "title": "Synthesis a New Photochromic Diarylethene for Electrochemical Switching and Holographic Optical Recording", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a536/12OmNwvVrBW", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nvmt/1996/3510/0/00534666", "title": "Holographic 3D disks", "doi": null, "abstractUrl": "/proceedings-article/nvmt/1996/00534666/12OmNxzuMLd", "parentPublication": { "id": "proceedings/nvmt/1996/3510/0", "title": "Proceedings of Nonvolatile Memory Technology Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dft/2010/8447/0/05634942", "title": "Recovery Method for a Laser Array Failure on Dynamic Optically Reconfigurable Gate Arrays", "doi": null, "abstractUrl": "/proceedings-article/dft/2010/05634942/12OmNyz5JSM", "parentPublication": { "id": "proceedings/dft/2010/8447/0", "title": "2010 IEEE 25th International Symposium on Defect and Fault Tolerance in VLSI Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567510", "title": "Influencing a single fluorescent molecule with single metallic nanoparticle", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567510/12OmNzBOicU", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567241", "title": "Soliton mobility in optically-induced nonlinear lattices", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567241/12OmNzEVRVf", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a461", "title": "Synthesis of a New Photochromic Diarylethene and its Application in Holographic Optical Storage", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a461/12OmNzZmZnP", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07760159", "title": "An enhanced efficient thin film silicon solar cell design based on silver nanoparticle", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760159/12OmNzb7Zrl", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBh8gZM", "doi": "10.1109/VR.2015.7223394", "title": "A procedure for accurate calibration of a tabletop haploscope AR environment", "normalizedTitle": "A procedure for accurate calibration of a tabletop haploscope AR environment", "abstract": "In previous papers, a novel haploscope-based AR environment was implemented [1, 3]. In that system, a participant looks through a set of reflective lenses onto a real-world environment. However, at the same time, there are monitors to the side displaying a virtual object. This object is reflected onto the lenses and is thus, from the viewpoint of the participant, overlaid onto the real environment. In Hua [1], some initial work was done designing a calibration procedure for this haploscope-based AR environment. The current work seeks to modify and expand Hua's original calibration procedure to make it both more effective and more efficient. As part of developing this new calibration procedure, this paper examines potential sources of error and recommends processes and steps for reducing or eliminating these potential error sources.", "abstracts": [ { "abstractType": "Regular", "content": "In previous papers, a novel haploscope-based AR environment was implemented [1, 3]. In that system, a participant looks through a set of reflective lenses onto a real-world environment. However, at the same time, there are monitors to the side displaying a virtual object. This object is reflected onto the lenses and is thus, from the viewpoint of the participant, overlaid onto the real environment. In Hua [1], some initial work was done designing a calibration procedure for this haploscope-based AR environment. The current work seeks to modify and expand Hua's original calibration procedure to make it both more effective and more efficient. As part of developing this new calibration procedure, this paper examines potential sources of error and recommends processes and steps for reducing or eliminating these potential error sources.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In previous papers, a novel haploscope-based AR environment was implemented [1, 3]. In that system, a participant looks through a set of reflective lenses onto a real-world environment. However, at the same time, there are monitors to the side displaying a virtual object. This object is reflected onto the lenses and is thus, from the viewpoint of the participant, overlaid onto the real environment. In Hua [1], some initial work was done designing a calibration procedure for this haploscope-based AR environment. The current work seeks to modify and expand Hua's original calibration procedure to make it both more effective and more efficient. As part of developing this new calibration procedure, this paper examines potential sources of error and recommends processes and steps for reducing or eliminating these potential error sources.", "fno": "07223394", "keywords": [ "Calibration", "Adaptive Optics", "Optical Imaging", "Optical Diffraction", "Optical Distortion", "Augmented Reality", "Lenses", "Depth Perception", "Augmented Reality", "Calibration" ], "authors": [ { "affiliation": "Mississippi State University", "fullName": "Nate Phillips", "givenName": "Nate", "surname": "Phillips", "__typename": "ArticleAuthorType" }, { "affiliation": "Mississippi State University", "fullName": "J. Edward Swan", "givenName": "J. Edward", "surname": "Swan", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "259-260", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223393", "articleId": "12OmNwDSdto", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223395", "articleId": "12OmNvq5jtO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223450", "title": "Evaluating optical see-through head-mounted display calibration via frustum visualization", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223221", "title": "Accurate calibration of CCD-cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223221/12OmNrGsDkm", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a184", "title": "Generalized Radial Alignment Constraint for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a184/12OmNwnYG1M", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a612", "title": "Self-Calibration of Optical Lenses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a612/12OmNyQ7FPm", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836487", "title": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836487/12OmNyRg4AG", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1988/0852/0/00012154", "title": "Calibration procedure for an industrial robot", "doi": null, "abstractUrl": "/proceedings-article/robot/1988/00012154/12OmNzVoBWp", "parentPublication": { "id": "proceedings/robot/1988/0852/0", "title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2006/08/i1335", "title": "A Generic Camera Model and Calibration Method for Conventional, Wide-Angle, and Fish-Eye Lenses", "doi": null, "abstractUrl": "/journal/tp/2006/08/i1335/13rRUyv53Gt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699263", "title": "Design and Calibration of an Augmented Reality Haploscope", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699263/19F1OYkEmWs", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798335", "title": "Design, Assembly, Calibration, and Measurement of an Augmented Reality Haploscope", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798335/1cJ122q4Cty", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lics/2021/4895/0/09470613", "title": "Higher Lenses", "doi": null, "abstractUrl": "/proceedings-article/lics/2021/09470613/1v2QoffQ5eo", "parentPublication": { "id": "proceedings/lics/2021/4895/0", "title": "2021 36th Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrMHOd6", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "acronym": "hicss", "groupId": "1000730", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNwO5LS5", "doi": "10.1109/HICSS.2016.68", "title": "A Processual View on Social Presence Emergence in Virtual Worlds", "normalizedTitle": "A Processual View on Social Presence Emergence in Virtual Worlds", "abstract": "Distributed collaboration is increasingly conducted in virtual worlds. Successful distributed collaboration is can benefit from social presence, the feeling of being there with others in a virtual environment. Social presence includes the dimensions of copresence, psychological involvement and behavioral engagement. Despite the importance of social presence, we currently lack empirically grounded understanding of how social presence emerges through these dimensions. To begin remedying this shortcoming, we analyzed how the social presence dimensions were organized in social interaction. We found that the order of the dimensions depends on verbal and nonverbal communication. These findings clarify extant theory of social presence emergence in 3D virtual worlds.", "abstracts": [ { "abstractType": "Regular", "content": "Distributed collaboration is increasingly conducted in virtual worlds. Successful distributed collaboration is can benefit from social presence, the feeling of being there with others in a virtual environment. Social presence includes the dimensions of copresence, psychological involvement and behavioral engagement. Despite the importance of social presence, we currently lack empirically grounded understanding of how social presence emerges through these dimensions. To begin remedying this shortcoming, we analyzed how the social presence dimensions were organized in social interaction. We found that the order of the dimensions depends on verbal and nonverbal communication. These findings clarify extant theory of social presence emergence in 3D virtual worlds.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Distributed collaboration is increasingly conducted in virtual worlds. Successful distributed collaboration is can benefit from social presence, the feeling of being there with others in a virtual environment. Social presence includes the dimensions of copresence, psychological involvement and behavioral engagement. Despite the importance of social presence, we currently lack empirically grounded understanding of how social presence emerges through these dimensions. To begin remedying this shortcoming, we analyzed how the social presence dimensions were organized in social interaction. We found that the order of the dimensions depends on verbal and nonverbal communication. These findings clarify extant theory of social presence emergence in 3D virtual worlds.", "fno": "5670a491", "keywords": [ "Virtual Groups", "Psychology", "Avatars", "Video Recording", "Second Life", "Three Dimensional Displays" ], "authors": [ { "affiliation": null, "fullName": "Laura Kohonen-Aho", "givenName": "Laura", "surname": "Kohonen-Aho", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Pauli Alin", "givenName": "Pauli", "surname": "Alin", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-01-01T00:00:00", "pubType": "proceedings", "pages": "491-500", "year": "2016", "issn": "1530-1605", "isbn": "978-0-7695-5670-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5670a490", "articleId": "12OmNzTYC1l", "__typename": "AdjacentArticleType" }, "next": { "fno": "5670a501", "articleId": "12OmNscfI2g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2014/4677/0/4677a093", "title": "User Avatar Association in Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a093/12OmNBhHt8t", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a107", "title": "Immersion in Virtual Worlds - But not Second Life!", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a107/12OmNrAv3P5", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sera/2011/1028/0/06065639", "title": "Death, Social Networks and Virtual Worlds: A Look Into the Digital Afterlife", "doi": null, "abstractUrl": "/proceedings-article/sera/2011/06065639/12OmNwKGAju", "parentPublication": { "id": "proceedings/sera/2011/1028/0", "title": "2011 9th International Conference on Software Engineering Research, Management and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pci/2009/3788/0/3788a207", "title": "Avatars' Appearance and Social Behavior in Online Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/pci/2009/3788a207/12OmNwt5sn9", "parentPublication": { "id": "proceedings/pci/2009/3788/0", "title": "2009 13th Panhellenic Conference on Informatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccc/2011/4689/0/06363395", "title": "Defining and Validating Virtual Worlds Usability Heuristics", "doi": null, "abstractUrl": "/proceedings-article/sccc/2011/06363395/12OmNxXUhOF", "parentPublication": { "id": "proceedings/sccc/2011/4689/0", "title": "2011 30th International Conference of the Chilean Computer Science Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2012/4525/0/4525b003", "title": "Virtual Worlds as Collaborative Innovation and Knowledge Platform", "doi": null, "abstractUrl": "/proceedings-article/hicss/2012/4525b003/12OmNxwWovo", "parentPublication": { "id": "proceedings/hicss/2012/4525/0", "title": "2012 45th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2010/4055/0/4055a186", "title": "Comparing Social Virtual Worlds for Educational Purposes", "doi": null, "abstractUrl": "/proceedings-article/icalt/2010/4055a186/12OmNzBOimq", "parentPublication": { "id": "proceedings/icalt/2010/4055/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892a873", "title": "Social Affordances for People with Lifelong Disability through Using Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892a873/12OmNzEmFEs", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2014/4677/0/4677a321", "title": "New Opportunities for Artistic Practice in Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a321/12OmNzahbYN", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2008/01/mic2008010088", "title": "3D Social Virtual Worlds: Research Issues and Challenges", "doi": null, "abstractUrl": "/magazine/ic/2008/01/mic2008010088/13rRUxlgxVx", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cM", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNxRF71o", "doi": "10.1109/ISMAR-Adjunct.2017.20", "title": "[POSTER] The Social AR Continuum: Concept and User Study", "normalizedTitle": "[POSTER] The Social AR Continuum: Concept and User Study", "abstract": "In this poster, we describe The Social AR Continuum, a space that encompasses different dimensions of Augmented Reality (AR) for sharing social experiences. We explore various dimensions, discuss options for each dimension, and brainstorm possible scenarios where these options might be useful. We describe a prototype interface using the contact placement dimension, and report on feedback from potential users which supports its usefulness for visualising social contacts. Based on this concept work, we suggest user studies in the social AR space, and give insights into future directions.", "abstracts": [ { "abstractType": "Regular", "content": "In this poster, we describe The Social AR Continuum, a space that encompasses different dimensions of Augmented Reality (AR) for sharing social experiences. We explore various dimensions, discuss options for each dimension, and brainstorm possible scenarios where these options might be useful. We describe a prototype interface using the contact placement dimension, and report on feedback from potential users which supports its usefulness for visualising social contacts. Based on this concept work, we suggest user studies in the social AR space, and give insights into future directions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this poster, we describe The Social AR Continuum, a space that encompasses different dimensions of Augmented Reality (AR) for sharing social experiences. We explore various dimensions, discuss options for each dimension, and brainstorm possible scenarios where these options might be useful. We describe a prototype interface using the contact placement dimension, and report on feedback from potential users which supports its usefulness for visualising social contacts. Based on this concept work, we suggest user studies in the social AR space, and give insights into future directions.", "fno": "6327a007", "keywords": [ "Avatars", "Three Dimensional Displays", "Visualization", "Augmented Reality", "Prototypes", "Collaboration", "Face" ], "authors": [ { "affiliation": null, "fullName": "Alaeddin Nassani", "givenName": "Alaeddin", "surname": "Nassani", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gun Lee", "givenName": "Gun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tobias Langlotz", "givenName": "Tobias", "surname": "Langlotz", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Simon Hoermann", "givenName": "Simon", "surname": "Hoermann", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Robert W. Lindeman", "givenName": "Robert W.", "surname": "Lindeman", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "7-8", "year": "2017", "issn": null, "isbn": "978-0-7695-6327-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6327a001", "articleId": "12OmNBQ2VWK", "__typename": "AdjacentArticleType" }, "next": { "fno": "6327a009", "articleId": "12OmNCbkQCy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948449", "title": "[Poster] Device vs. user perspective rendering in google glass AR applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948449/12OmNCbU2XO", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948436", "title": "[Poster] Ongoing development of a user-centered, AR testbed in industry", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948436/12OmNs59JLY", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836459", "title": "Participatory Design of STEM Education AR Experiences for Heterogeneous Student Groups: Exploring Dimensions of Tangibility, Simulation, and Interaction", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836459/12OmNx4gUnf", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a105", "title": "Does a Digital Assistant Need a Body? The Influence of Visual Embodiment and Social Behavior on the Perception of Intelligent Virtual Agents in AR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a105/17D45WXIkEy", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699336", "title": "Filtering 3D Shared Surrounding Environments by Social Proximity in AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699336/19F1PF5yeGY", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a026", "title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a026/1gysn4uy67C", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090604", "title": "Utilizing AR Glasses as Mobility Aid for People with Low Vision", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090604/1jIxi1ubEcg", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a039", "title": "Designing a Multitasking Interface for Object-aware AR applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a039/1pBMfjaOy08", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a153", "title": "AR-Chat: an AR-based instant messaging system", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a153/1pBMgRIZa6I", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKisy", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WXIkEy", "doi": "10.1109/ISMAR.2018.00039", "title": "Does a Digital Assistant Need a Body? The Influence of Visual Embodiment and Social Behavior on the Perception of Intelligent Virtual Agents in AR", "normalizedTitle": "Does a Digital Assistant Need a Body? The Influence of Visual Embodiment and Social Behavior on the Perception of Intelligent Virtual Agents in AR", "abstract": "Intelligent Virtual Agents (IVAs) are becoming part of our everyday life, thanks to artificial intelligence technology and Internet of Things devices. For example, users can control their connected home appliances through natural voice commands to the IVA. However, most current-state commercial IVAs, such as Amazon Alexa, mainly focus on voice commands and voice feedback, and lack the ability to provide non-verbal cues which are an important part of social interaction. Augmented Reality (AR) has the potential to overcome this challenge by providing a visual embodiment of the IVA. In this paper we investigate how visual embodiment and social behaviors influence the perception of the IVA. We hypothesize that a user's confidence in an IVA's ability to perform tasks is improved when imbuing the agent with a human body and social behaviors compared to the agent solely depending on voice feedback. In other words, an agent's embodied gesture and locomotion behavior exhibiting awareness of the surrounding real world or exerting influence over the environment can improve the perceived social presence with and confidence in the agent. We present a human-subject study, in which we evaluated the hypothesis and compared different forms of IVAs with speech, gesturing, and locomotion behaviors in an interactive AR scenario. The results show support for the hypothesis with measures of confidence, trust, and social presence. We discuss implications for future developments in the field of IVAs.", "abstracts": [ { "abstractType": "Regular", "content": "Intelligent Virtual Agents (IVAs) are becoming part of our everyday life, thanks to artificial intelligence technology and Internet of Things devices. For example, users can control their connected home appliances through natural voice commands to the IVA. However, most current-state commercial IVAs, such as Amazon Alexa, mainly focus on voice commands and voice feedback, and lack the ability to provide non-verbal cues which are an important part of social interaction. Augmented Reality (AR) has the potential to overcome this challenge by providing a visual embodiment of the IVA. In this paper we investigate how visual embodiment and social behaviors influence the perception of the IVA. We hypothesize that a user's confidence in an IVA's ability to perform tasks is improved when imbuing the agent with a human body and social behaviors compared to the agent solely depending on voice feedback. In other words, an agent's embodied gesture and locomotion behavior exhibiting awareness of the surrounding real world or exerting influence over the environment can improve the perceived social presence with and confidence in the agent. We present a human-subject study, in which we evaluated the hypothesis and compared different forms of IVAs with speech, gesturing, and locomotion behaviors in an interactive AR scenario. The results show support for the hypothesis with measures of confidence, trust, and social presence. We discuss implications for future developments in the field of IVAs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Intelligent Virtual Agents (IVAs) are becoming part of our everyday life, thanks to artificial intelligence technology and Internet of Things devices. For example, users can control their connected home appliances through natural voice commands to the IVA. However, most current-state commercial IVAs, such as Amazon Alexa, mainly focus on voice commands and voice feedback, and lack the ability to provide non-verbal cues which are an important part of social interaction. Augmented Reality (AR) has the potential to overcome this challenge by providing a visual embodiment of the IVA. In this paper we investigate how visual embodiment and social behaviors influence the perception of the IVA. We hypothesize that a user's confidence in an IVA's ability to perform tasks is improved when imbuing the agent with a human body and social behaviors compared to the agent solely depending on voice feedback. In other words, an agent's embodied gesture and locomotion behavior exhibiting awareness of the surrounding real world or exerting influence over the environment can improve the perceived social presence with and confidence in the agent. We present a human-subject study, in which we evaluated the hypothesis and compared different forms of IVAs with speech, gesturing, and locomotion behaviors in an interactive AR scenario. The results show support for the hypothesis with measures of confidence, trust, and social presence. We discuss implications for future developments in the field of IVAs.", "fno": "745900a105", "keywords": [ "Augmented Reality", "Software Agents", "Intelligent Virtual Agents", "Social Behaviors", "IVA", "AR", "Amazon Alexa", "Human Subject Study", "Embodied Gesture", "Social Interaction", "Voice Feedback", "Voice Commands", "Artificial Intelligence Technology", "Social Behavior", "Visual Embodiment", "Locomotion Behaviors", "Perceived Social Presence", "Locomotion Behavior", "Visualization", "Internet Of Things", "Robots", "Avatars", "Augmented Reality", "Face", "Atmospheric Measurements", "Intelligent Virtual Agents", "Digital Assistants", "Social Interaction", "Presence", "Confidence", "Trust In Technology", "Augmented Reality" ], "authors": [ { "affiliation": null, "fullName": "Kangsoo Kim", "givenName": "Kangsoo", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luke Boelling", "givenName": "Luke", "surname": "Boelling", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Steffen Haesler", "givenName": "Steffen", "surname": "Haesler", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jeremy Bailenson", "givenName": "Jeremy", "surname": "Bailenson", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gerd Bruder", "givenName": "Gerd", "surname": "Bruder", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Greg F. Welch", "givenName": "Greg F.", "surname": "Welch", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "105-114", "year": "2018", "issn": "1554-7868", "isbn": "978-1-5386-7459-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "745900a098", "articleId": "17D45WYQJag", "__typename": "AdjacentArticleType" }, "next": { "fno": "745900a115", "articleId": "17D45WK5AlG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tg/2013/04/ttg2013040591", "title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699187", "title": "Seeing is Believing: Improving the Perceived Trust in Visually Embodied Alexa in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699187/19F1Qnkq8Ao", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a730", "title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a278", "title": "Enhancing the Sense of Agency by Transitional Weight Control in Virtual Co-Embodiment", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a278/1JrRf7PXOTK", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10109126", "title": "Measuring Embodiment: Movement Complexity and the Impact of Personal Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/10109126/1METe7DRIic", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a017", "title": "Effects of Patient Care Assistant Embodiment and Computer Mediation on User Experience", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a017/1grOj751HBS", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089596", "title": "Reducing Task Load with an Embodied Intelligent Virtual Assistant for Improved Performance in Collaborative Decision Making", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089596/1jIx7ELvYVa", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090457", "title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a054", "title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a054/1pyswgi4b7y", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgYjAysYU", "doi": "10.1109/VR55154.2023.00070", "title": "Persuasive Vibrations: Effects of Speech-Based Vibrations on Persuasion, Leadership, and Co-Presence During Verbal Communication in VR", "normalizedTitle": "Persuasive Vibrations: Effects of Speech-Based Vibrations on Persuasion, Leadership, and Co-Presence During Verbal Communication in VR", "abstract": "In Virtual Reality (VR), a growing number of applications involve verbal communications with avatars, such as for teleconference, entertainment, virtual training, social networks, etc. In this context, our paper aims to investigate how tactile feedback consisting in vibrations synchronized with speech could influence aspects related to VR social interactions such as persuasion, co-presence and leadership. We conducted two experiments where participants embody a first-person avatar attending a virtual meeting in immersive VR. In the first experiment, participants were listening to two speaking virtual agents and the speech of one agent was augmented with vibrotactile feedback. Interestingly, the results show that such vibrotactile feedback could significantly improve the perceived co-presence but also the persuasiveness and leadership of the haptically-augmented agent. In the second experiment, the participants were asked to speak to two agents, and their own speech was augmented or not with vibrotactile feedback. The results show that vibrotactile feedback had again a positive effect on co-presence, and that participants perceive their speech as more persuasive in presence of haptic feedback. Taken together, our results demonstrate the strong potential of haptic feedback for supporting social interactions in VR, and pave the way to novel usages of vibrations in a wide range of applications in which verbal communication plays a prominent role.", "abstracts": [ { "abstractType": "Regular", "content": "In Virtual Reality (VR), a growing number of applications involve verbal communications with avatars, such as for teleconference, entertainment, virtual training, social networks, etc. In this context, our paper aims to investigate how tactile feedback consisting in vibrations synchronized with speech could influence aspects related to VR social interactions such as persuasion, co-presence and leadership. We conducted two experiments where participants embody a first-person avatar attending a virtual meeting in immersive VR. In the first experiment, participants were listening to two speaking virtual agents and the speech of one agent was augmented with vibrotactile feedback. Interestingly, the results show that such vibrotactile feedback could significantly improve the perceived co-presence but also the persuasiveness and leadership of the haptically-augmented agent. In the second experiment, the participants were asked to speak to two agents, and their own speech was augmented or not with vibrotactile feedback. The results show that vibrotactile feedback had again a positive effect on co-presence, and that participants perceive their speech as more persuasive in presence of haptic feedback. Taken together, our results demonstrate the strong potential of haptic feedback for supporting social interactions in VR, and pave the way to novel usages of vibrations in a wide range of applications in which verbal communication plays a prominent role.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In Virtual Reality (VR), a growing number of applications involve verbal communications with avatars, such as for teleconference, entertainment, virtual training, social networks, etc. In this context, our paper aims to investigate how tactile feedback consisting in vibrations synchronized with speech could influence aspects related to VR social interactions such as persuasion, co-presence and leadership. We conducted two experiments where participants embody a first-person avatar attending a virtual meeting in immersive VR. In the first experiment, participants were listening to two speaking virtual agents and the speech of one agent was augmented with vibrotactile feedback. Interestingly, the results show that such vibrotactile feedback could significantly improve the perceived co-presence but also the persuasiveness and leadership of the haptically-augmented agent. In the second experiment, the participants were asked to speak to two agents, and their own speech was augmented or not with vibrotactile feedback. The results show that vibrotactile feedback had again a positive effect on co-presence, and that participants perceive their speech as more persuasive in presence of haptic feedback. Taken together, our results demonstrate the strong potential of haptic feedback for supporting social interactions in VR, and pave the way to novel usages of vibrations in a wide range of applications in which verbal communication plays a prominent role.", "fno": "481500a552", "keywords": [ "Vibrations", "Training", "Leadership", "Three Dimensional Displays", "Avatars", "Tactile Sensors", "User Interfaces", "Audio", "Haptic", "Vibrotactile Feedback", "Speech", "Co Presence", "Leadership", "Persuasion" ], "authors": [ { "affiliation": "Inria Rennes", "fullName": "Justine Saint-Aubert", "givenName": "Justine", "surname": "Saint-Aubert", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria Rennes", "fullName": "Ferran Argelaguet", "givenName": "Ferran", "surname": "Argelaguet", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria Rennes", "fullName": "Marc Macé", "givenName": "Marc", "surname": "Macé", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria Rennes", "fullName": "Claudio Pacchierotti", "givenName": "Claudio", "surname": "Pacchierotti", "__typename": "ArticleAuthorType" }, { "affiliation": "Reichman University,Israel", "fullName": "Amir Amedi", "givenName": "Amir", "surname": "Amedi", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria Rennes", "fullName": "Anatole Lécuyer", "givenName": "Anatole", "surname": "Lécuyer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "552-560", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgY5TPQ40", "name": "pvr202348150-010108422s1-mm_481500a552.zip", "size": "54.8 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108422s1-mm_481500a552.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a541", "articleId": "1MNgMgQsPjW", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a561", "articleId": "1MNgq5zE1BS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2017/0563/0/08273612", "title": "Emotional responses of vibrotactile-thermal stimuli: Effects of constant-temperature thermal stimuli", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273612/12OmNqMPfQu", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892375", "title": "Experiencing guidance in 3D spaces with a vibrotactile head-mounted display", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892375/12OmNy5hRo2", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446177", "title": "You Shall Not Pass: Non-Intrusive Feedback for Virtual Walls in VR Environments with Room-Scale Mapping", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446177/13bd1eSlyu1", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010013", "title": "Comparison of Visual and Vibrotactile Feedback Methods for Seated Posture Guidance", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010013/13rRUxcKzVp", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09911682", "title": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar", "doi": null, "abstractUrl": "/journal/tg/5555/01/09911682/1HeiWQWKlTG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a001", "title": "Estimating the Just Noticeable Difference of Tactile Feedback in Oculus Quest 2 Controllers", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a001/1JrRdMd6OZi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797956", "title": "Haptic Compass: Active Vibrotactile Feedback of Physical Object for Path Guidance", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797956/1cJ17BLEK88", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212865", "title": "Identification of Vibrotactile Flow Patterns on a Handheld Haptic Device", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212865/1nHRQWVTfMc", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a350", "title": "Investigating Remote Tactile Feedback for Mid-Air Text-Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a350/1pysyvL4CwU", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyshXRzHpK", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gysnLO51K0", "doi": "10.1109/ISMAR-Adjunct.2019.00019", "title": "Filtering Mechanisms of Shared Social Surrounding Environments in AR", "normalizedTitle": "Filtering Mechanisms of Shared Social Surrounding Environments in AR", "abstract": "This work describes a system and a user study for hiding and showing parts of the shared surrounding spaces in social AR applications for wearable devices. It extends previous work on the Social AR Continuum by exploring how sharing the surrounding environment can vary based on the social proximity between contacts. We built a prototype system for sharing the surrounding environment between two HoloLens devices. We found that Remove is the preferred hiding mechanism for the sharer in terms of social presence. We discuss the research findings and outline future directions for research in sharing surrounding spaces on social AR applications for wearable devices.", "abstracts": [ { "abstractType": "Regular", "content": "This work describes a system and a user study for hiding and showing parts of the shared surrounding spaces in social AR applications for wearable devices. It extends previous work on the Social AR Continuum by exploring how sharing the surrounding environment can vary based on the social proximity between contacts. We built a prototype system for sharing the surrounding environment between two HoloLens devices. We found that Remove is the preferred hiding mechanism for the sharer in terms of social presence. We discuss the research findings and outline future directions for research in sharing surrounding spaces on social AR applications for wearable devices.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work describes a system and a user study for hiding and showing parts of the shared surrounding spaces in social AR applications for wearable devices. It extends previous work on the Social AR Continuum by exploring how sharing the surrounding environment can vary based on the social proximity between contacts. We built a prototype system for sharing the surrounding environment between two HoloLens devices. We found that Remove is the preferred hiding mechanism for the sharer in terms of social presence. We discuss the research findings and outline future directions for research in sharing surrounding spaces on social AR applications for wearable devices.", "fno": "476500a017", "keywords": [ "Augmented Reality", "Behavioural Sciences Computing", "Helmet Mounted Displays", "Wearable Devices", "Filtering Mechanisms", "Shared Surrounding Spaces", "Social AR Applications", "Social AR Continuum", "Social Proximity", "Prototype System", "Holo Lens Devices", "Hiding Mechanism", "Social Presence", "Three Dimensional Displays", "Prototypes", "Collaboration", "Avatars", "Social Networking Online", "Privacy", "Semantics", "Augmented Reality", "Virtual Avatars", "Social Computing" ], "authors": [ { "affiliation": "HIT Lab NZ", "fullName": "Alaeddin Nassani", "givenName": "Alaeddin", "surname": "Nassani", "__typename": "ArticleAuthorType" }, { "affiliation": "University of South Australia", "fullName": "Gun Lee", "givenName": "Gun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" }, { "affiliation": "HIT Lab NZ", "fullName": "Robert W. Lindeman", "givenName": "Robert W.", "surname": "Lindeman", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "17-19", "year": "2019", "issn": null, "isbn": "978-1-7281-4765-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "476500a012", "articleId": "1gysjF4vMuk", "__typename": "AdjacentArticleType" }, "next": { "fno": "476500a020", "articleId": "1gysji9xPlm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2015/8471/0/8471a026", "title": "AR Marker Hiding with Real-Time Texture Deformation", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a026/12OmNAXglMY", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504726", "title": "Redirected head gaze to support AR meetings distributed over heterogeneous environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504726/12OmNBOCWvM", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a007", "title": "[POSTER] The Social AR Continuum: Concept and User Study", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a007/12OmNxRF71o", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a015", "title": "Maintaining and Enhancing Human-Surrogate Presence in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a015/12OmNxvO05R", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a105", "title": "Does a Digital Assistant Need a Body? The Influence of Visual Embodiment and Social Behavior on the Perception of Intelligent Virtual Agents in AR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a105/17D45WXIkEy", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699336", "title": "Filtering 3D Shared Surrounding Environments by Social Proximity in AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699336/19F1PF5yeGY", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a666", "title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798100", "title": "Towards a Framework on Accessible and Social VR in Education", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798100/1cJ16Rutlm0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2020/8086/0/09219617", "title": "Decentralized Social Media Applications as a Service: a Car-Sharing Perspective", "doi": null, "abstractUrl": "/proceedings-article/iscc/2020/09219617/1nRPq0wT2jm", "parentPublication": { "id": "proceedings/iscc/2020/8086/0", "title": "2020 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a153", "title": "AR-Chat: an AR-based instant messaging system", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a153/1pBMgRIZa6I", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pyswcPbPEI", "doi": "10.1109/ISMAR50242.2020.00020", "title": "Generating Emotive Gaits for Virtual Agents Using Affect-Based Autoregression", "normalizedTitle": "Generating Emotive Gaits for Virtual Agents Using Affect-Based Autoregression", "abstract": "We present a novel autoregression network to generate virtual agents that convey various emotions through their walking styles or gaits. Given the 3D pose sequences of a gait, our network extracts pertinent movement features and affective features from the gait. We use these features to synthesize subsequent gaits such that the virtual agents can express and transition between emotions represented as combinations of happy, sad, angry, and neutral. We incorporate multiple regularizations in the training of our network to simultaneously enforce plausible movements and noticeable emotions on the virtual agents. We also integrate our approach with an AR environment using a Microsoft HoloLens and can generate emotive gaits at interactive rates to increase the social presence. We evaluate how human observers perceive both the naturalness and the emotions from the generated gaits of the virtual agents in a web-based study. Our results indicate around 89% of the users found the naturalness of the gaits satisfactory on a five-point Likert scale, and the emotions they perceived from the virtual agents are statistically similar to the intended emotions of the virtual agents. We also use our network to augment existing gait datasets with emotive gaits and will release this augmented dataset for future research in emotion prediction and emotive gait synthesis. Our project website is available at https://gamma.umd.edu/gen-emotive-gaits/.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel autoregression network to generate virtual agents that convey various emotions through their walking styles or gaits. Given the 3D pose sequences of a gait, our network extracts pertinent movement features and affective features from the gait. We use these features to synthesize subsequent gaits such that the virtual agents can express and transition between emotions represented as combinations of happy, sad, angry, and neutral. We incorporate multiple regularizations in the training of our network to simultaneously enforce plausible movements and noticeable emotions on the virtual agents. We also integrate our approach with an AR environment using a Microsoft HoloLens and can generate emotive gaits at interactive rates to increase the social presence. We evaluate how human observers perceive both the naturalness and the emotions from the generated gaits of the virtual agents in a web-based study. Our results indicate around 89% of the users found the naturalness of the gaits satisfactory on a five-point Likert scale, and the emotions they perceived from the virtual agents are statistically similar to the intended emotions of the virtual agents. We also use our network to augment existing gait datasets with emotive gaits and will release this augmented dataset for future research in emotion prediction and emotive gait synthesis. Our project website is available at https://gamma.umd.edu/gen-emotive-gaits/.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel autoregression network to generate virtual agents that convey various emotions through their walking styles or gaits. Given the 3D pose sequences of a gait, our network extracts pertinent movement features and affective features from the gait. We use these features to synthesize subsequent gaits such that the virtual agents can express and transition between emotions represented as combinations of happy, sad, angry, and neutral. We incorporate multiple regularizations in the training of our network to simultaneously enforce plausible movements and noticeable emotions on the virtual agents. We also integrate our approach with an AR environment using a Microsoft HoloLens and can generate emotive gaits at interactive rates to increase the social presence. We evaluate how human observers perceive both the naturalness and the emotions from the generated gaits of the virtual agents in a web-based study. Our results indicate around 89% of the users found the naturalness of the gaits satisfactory on a five-point Likert scale, and the emotions they perceived from the virtual agents are statistically similar to the intended emotions of the virtual agents. We also use our network to augment existing gait datasets with emotive gaits and will release this augmented dataset for future research in emotion prediction and emotive gait synthesis. Our project website is available at https://gamma.umd.edu/gen-emotive-gaits/.", "fno": "850800a024", "keywords": [ "Augmented Reality", "Autoregressive Processes", "Emotion Recognition", "Feature Extraction", "Gait Analysis", "Image Motion Analysis", "Learning Artificial Intelligence", "Neural Nets", "Pose Estimation", "Virtual Agents", "Emotive Gait Synthesis", "Pertinent Movement Feature Extraction", "Affect Based Autoregression", "3 D Pose Sequences", "AR Environment", "Microsoft Holo Lens", "Five Point Likert Scale", "Training", "Legged Locomotion", "Three Dimensional Displays", "Observers", "Feature Extraction", "Augmented Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms Mixed Augmented Reality", "Computing Methodologies", "Machine Learning", "Machine Learning Approaches", "Neural Networks" ], "authors": [ { "affiliation": "UMD College Park,USA", "fullName": "Uttaran Bhattacharya", "givenName": "Uttaran", "surname": "Bhattacharya", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Nicholas Rewkowski", "givenName": "Nicholas", "surname": "Rewkowski", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Pooja Guhan", "givenName": "Pooja", "surname": "Guhan", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Niall L. Williams", "givenName": "Niall L.", "surname": "Williams", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Trisha Mittal", "givenName": "Trisha", "surname": "Mittal", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Aniket Bera", "givenName": "Aniket", "surname": "Bera", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College Park,USA", "fullName": "Dinesh Manocha", "givenName": "Dinesh", "surname": "Manocha", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "24-35", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a012", "articleId": "1pysvDRGQq4", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a036", "articleId": "1pysuNxxkOI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/robot/1991/2163/0/00131938", "title": "Turning gait of a quadrupedal walking machine", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131938/12OmNqEAT3E", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/med/2006/1/0/04124876", "title": "Sample-Based HZD Control for Robustness and Slope Invariance of Planar Passive Bipedal Gaits", "doi": null, "abstractUrl": "/proceedings-article/med/2006/04124876/12OmNwoghcW", "parentPublication": { "id": "proceedings/med/2006/1/0", "title": "Proceedings of the 14th Mediterranean Conference on Control and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a460", "title": "Perception of Emotional Gaits Using Avatar Animation of Real and Artificially Synthesized Gaits", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a460/12OmNzWx07H", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2016/0806/0/07550922", "title": "Gaits classification of normal vs. patients by wireless gait sensor and Support Vector Machine (SVM) classifier", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550922/12OmNzwpUnJ", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/04/07778190", "title": "Identifying Emotions from Non-Contact Gaits Information Based on Microsoft Kinects", "doi": null, "abstractUrl": "/journal/ta/2018/04/07778190/17D45XfSETv", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a612", "title": "A Generic Decentralized Gait Generator Architecture for Statically Stable Motion of Crawling Robots", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a612/18M7hWcDDP2", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2022/5490/0/10086022", "title": "Emotion Recognition from Non-Straight Walking Gaits Induced by Emotional Videos", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2022/10086022/1M668zKSeFa", "parentPublication": { "id": "proceedings/aciiw/2022/5490/0", "title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/08907402", "title": "Modeling Data-Driven Dominance Traits for Virtual Characters Using Gait Analysis", "doi": null, "abstractUrl": "/journal/tg/2021/06/08907402/1f75TiiWgik", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a395", "title": "Learning Perceived Emotion Using Affective and Deep Features for Mental Health Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a395/1gyskQ3YBeU", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a160", "title": "Text2Gestures: A Transformer-Based Network for Generating Emotive Body Gestures for Virtual Agents", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a160/1tuASeZqmc0", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1eSlytz", "doi": "10.1109/VR.2018.8446221", "title": "Simulator Sick but Still Immersed: A Comparison of Head-Object Collision Handling and Their Impact on Fun, Immersion, and Simulator Sickness", "normalizedTitle": "Simulator Sick but Still Immersed: A Comparison of Head-Object Collision Handling and Their Impact on Fun, Immersion, and Simulator Sickness", "abstract": "We compared three techniques for handling head-object collisions in room-scale virtual reality (VR). We developed a game whose mechanics induce such collisions which we either addressed (1) not at all, (2) by fading the screen information to black, or (3) by restricting translation, i.e. correcting the virtual offset in such a way that no penetration occurred. We measured these conditions' impact on simulator sickness, fun, and immersion perception. We found that the translation-restricted method yielded the greatest immersion value but also contributed the most to simulator sickness.", "abstracts": [ { "abstractType": "Regular", "content": "We compared three techniques for handling head-object collisions in room-scale virtual reality (VR). We developed a game whose mechanics induce such collisions which we either addressed (1) not at all, (2) by fading the screen information to black, or (3) by restricting translation, i.e. correcting the virtual offset in such a way that no penetration occurred. We measured these conditions' impact on simulator sickness, fun, and immersion perception. We found that the translation-restricted method yielded the greatest immersion value but also contributed the most to simulator sickness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We compared three techniques for handling head-object collisions in room-scale virtual reality (VR). We developed a game whose mechanics induce such collisions which we either addressed (1) not at all, (2) by fading the screen information to black, or (3) by restricting translation, i.e. correcting the virtual offset in such a way that no penetration occurred. We measured these conditions' impact on simulator sickness, fun, and immersion perception. We found that the translation-restricted method yielded the greatest immersion value but also contributed the most to simulator sickness.", "fno": "08446221", "keywords": [ "Computer Games", "Digital Simulation", "Human Computer Interaction", "Virtual Reality", "Simulator Sickness", "Head Object Collision Handling", "Room Scale Virtual Reality", "Immersion Perception", "Game", "Translation Restricted Method", "Immersion Value", "Games", "Visualization", "Time Measurement", "Atmospheric Measurements", "Particle Measurements", "Virtual Reality", "Collision Avoidance", "Human Centered Computing Virtual Reality", "Human Centered Computing Empirical Studies In Visualization" ], "authors": [ { "affiliation": "Games Engineering", "fullName": "Peter Ziegler", "givenName": "Peter", "surname": "Ziegler", "__typename": "ArticleAuthorType" }, { "affiliation": "Games Engineering", "fullName": "Daniel Roth", "givenName": "Daniel", "surname": "Roth", "__typename": "ArticleAuthorType" }, { "affiliation": "Games Engineering", "fullName": "Andreas Knots", "givenName": "Andreas", "surname": "Knots", "__typename": "ArticleAuthorType" }, { "affiliation": "Games Engineering", "fullName": "Michael Kreuzer", "givenName": "Michael", "surname": "Kreuzer", "__typename": "ArticleAuthorType" }, { "affiliation": "Games Engineering", "fullName": "Sebastian von Mammen", "givenName": "Sebastian", "surname": "von Mammen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "743-744", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446581", "articleId": "13bd1fKQxrI", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446151", "articleId": "13bd1fWcuDe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811037", "title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2002/1492/0/14920164", "title": "Effects of Field of View on Presence, Enjoyment, Memory, and Simulator Sickness in a Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920164/12OmNvUsoqB", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2006/0224/0/02240097", "title": "Demand Characteristics of a Questionnaire Used to Assess Motion Sickness in a Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2006/02240097/12OmNzAFSYS", "parentPublication": { "id": "proceedings/vr/2006/0224/0", "title": "IEEE Virtual Reality Conference (VR 2006)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/03/v0422", "title": "Demand Characteristics in Assessing Motion Sickness in a Virtual Environment: Or Does Taking a Motion Sickness Questionnaire Make You Sick?", "doi": null, "abstractUrl": "/journal/tg/2007/03/v0422/13rRUxASuht", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493408", "title": "A Virtual Nose as a Rest-Frame - The Impact on Simulator Sickness and Game Experience", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493408/14tNJpOSCm4", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798291", "title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798291/1cJ0GMB2sV2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08798880", "title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness", "doi": null, "abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090573", "title": "On the Usage of the Simulator Sickness Questionnaire for Virtual Reality Research", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090573/1jIxobU7Q3e", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090670", "title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>&#x00B0;</sup> Videos Viewed with an HMD", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090670/1jIxwAw9Z9C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09551731", "title": "Learning from Deep Stereoscopic Attention for Simulator Sickness Prediction", "doi": null, "abstractUrl": "/journal/tg/2023/02/09551731/1xgx3DIeexq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgWtYsR5S", "doi": "10.1109/VR55154.2023.00025", "title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments", "normalizedTitle": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments", "abstract": "Previous studies showed that natural walking reduces the susceptibility to VR sickness. However, many users still experience VR sickness when wearing VR headsets that allow free walking in room-scale spaces. This paper studies VR sickness and postural instability while the user walks in an immersive virtual environment using an electroencephalogram (EEG) headset and a full-body motion capture system. The experiment induced VR sickness by gradually increasing the translation gain beyond the user&#x0027;s detection threshold. A between-group comparison between participants with and without VR sickness symptoms found some significant differences in postural stability but found none on gait patterns during the walking. In the EEG analysis, the group with VR sickness showed a reduction of alpha power, a phenomenon previously linked to a higher workload and efforts to maintain postural control. In contrast, the group without VR sickness exhibited brain activities linked to fine cognitive-motor control. The EEG result provides new insights into the postural instability theory: participants with VR sickness could maintain their postural stability at the cost of a higher cognitive workload. Our result also indicates that the analysis of lower-frequency power could complement behavioral data for continuous VR sickness detection in both stationary and mobile VR setups.", "abstracts": [ { "abstractType": "Regular", "content": "Previous studies showed that natural walking reduces the susceptibility to VR sickness. However, many users still experience VR sickness when wearing VR headsets that allow free walking in room-scale spaces. This paper studies VR sickness and postural instability while the user walks in an immersive virtual environment using an electroencephalogram (EEG) headset and a full-body motion capture system. The experiment induced VR sickness by gradually increasing the translation gain beyond the user&#x0027;s detection threshold. A between-group comparison between participants with and without VR sickness symptoms found some significant differences in postural stability but found none on gait patterns during the walking. In the EEG analysis, the group with VR sickness showed a reduction of alpha power, a phenomenon previously linked to a higher workload and efforts to maintain postural control. In contrast, the group without VR sickness exhibited brain activities linked to fine cognitive-motor control. The EEG result provides new insights into the postural instability theory: participants with VR sickness could maintain their postural stability at the cost of a higher cognitive workload. Our result also indicates that the analysis of lower-frequency power could complement behavioral data for continuous VR sickness detection in both stationary and mobile VR setups.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Previous studies showed that natural walking reduces the susceptibility to VR sickness. However, many users still experience VR sickness when wearing VR headsets that allow free walking in room-scale spaces. This paper studies VR sickness and postural instability while the user walks in an immersive virtual environment using an electroencephalogram (EEG) headset and a full-body motion capture system. The experiment induced VR sickness by gradually increasing the translation gain beyond the user's detection threshold. A between-group comparison between participants with and without VR sickness symptoms found some significant differences in postural stability but found none on gait patterns during the walking. In the EEG analysis, the group with VR sickness showed a reduction of alpha power, a phenomenon previously linked to a higher workload and efforts to maintain postural control. In contrast, the group without VR sickness exhibited brain activities linked to fine cognitive-motor control. The EEG result provides new insights into the postural instability theory: participants with VR sickness could maintain their postural stability at the cost of a higher cognitive workload. Our result also indicates that the analysis of lower-frequency power could complement behavioral data for continuous VR sickness detection in both stationary and mobile VR setups.", "fno": "481500a094", "keywords": [ "Legged Locomotion", "Headphones", "Three Dimensional Displays", "Costs", "Virtual Environments", "User Interfaces", "Electroencephalography" ], "authors": [ { "affiliation": "iCinema Centre, University of New South Wales", "fullName": "Carlos Alfredo Tirado Cortes", "givenName": "Carlos Alfredo Tirado", "surname": "Cortes", "__typename": "ArticleAuthorType" }, { "affiliation": "Australian AI Institute, GrapheneX-UTS Human-centric AI Centre, University of Technology,Sydney", "fullName": "Chin-Teng Lin", "givenName": "Chin-Teng", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "Australian AI Institute, GrapheneX-UTS Human-centric AI Centre, University of Technology,Sydney", "fullName": "Tien-Thong Nguyen Do", "givenName": "Tien-Thong Nguyen", "surname": "Do", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer and Mathematical Sciences, University of Adelaide", "fullName": "Hsiang-Ting Chen", "givenName": "Hsiang-Ting", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "94-104", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgW4mALsY", "name": "pvr202348150-010108449s1-mm_481500a094.zip", "size": "330 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108449s1-mm_481500a094.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a083", "articleId": "1MNgRmjl6Zq", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a105", "articleId": "1MNgCnmbXyM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811037", "title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577177", "title": "Influence of hearing your steps and environmental sounds in VR while walking", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797999", "title": "VR system to simulate tightrope walking with a standalone VR headset and slack rails", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797999/1cJ0Nqr10CA", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798213", "title": "VR Sickness Prediction for Navigation in Immersive Virtual Environments using a Deep Long Short Term Memory Model", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798213/1cJ0RYruJIA", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798158", "title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08798880", "title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness", "doi": null, "abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a627", "title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a209", "title": "Finding a range of perceived natural visual walking speed for stationary travelling techniques in VR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a209/1yeQYNSYkSY", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxwgIdgsw", "doi": "10.1109/VRW50115.2020.00100", "title": "Evaluation of Simulator Sickness for 360&#x00B0; Videos on an HMD Subject to Participants&#x2019; Experience with Virtual Reality", "normalizedTitle": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality", "abstract": "Virtual reality (VR) has seen tremendous advances in head-mounted displays (HMDs), optics, media quality, and other improvements that facilitate immersive experiences. With the occurrence of new technologies like Cloud VR and networked VR video services, applications such as 360&#x00B0; video streaming are becoming more popular within the broader consumer markets. As a result, VR content is accessible to customers with rather different levels of experiences with immersive media, i.e., never, sometimes, or often use of VR. The question, therefore, arises to which degree simulator sickness is induced to viewers depending on their experiences with VR on HMDs. In this paper, simulator sickness is evaluated for 360&#x00B0; videos that were shown on an HTC Vive Pro HMD to participants having different levels of experience with VR on HMDs. The modified absolute category rating with hidden reference (M-ACR-HR) method was used in a subjective experiment for video quality assessment within two subsequent sessions along with a simulator sickness questionnaire (SSQ). A statistical analysis of the SSQ scores is performed to reveal the relationship between simulator sickness and participants&#x2019; experiences with VR regarding: (1) Individual symptoms, (2) Pairwise comparison of symptoms, and (3) Symptom clusters of nausea, oculomotor, disorientation, and total score. It is shown that the simulator sickness symptoms, in general, are slightly or rarely perceived across the different experience levels for the selected 360&#x00B0; videos. The results indicate that the reported simulator sickness increases in the second session for participants that never used VR on HMDs. Sufficiently long breaks between sessions should therefore be accounted for in the M-ACR-HR method to avoid that simulator sickness influences quality rating.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) has seen tremendous advances in head-mounted displays (HMDs), optics, media quality, and other improvements that facilitate immersive experiences. With the occurrence of new technologies like Cloud VR and networked VR video services, applications such as 360&#x00B0; video streaming are becoming more popular within the broader consumer markets. As a result, VR content is accessible to customers with rather different levels of experiences with immersive media, i.e., never, sometimes, or often use of VR. The question, therefore, arises to which degree simulator sickness is induced to viewers depending on their experiences with VR on HMDs. In this paper, simulator sickness is evaluated for 360&#x00B0; videos that were shown on an HTC Vive Pro HMD to participants having different levels of experience with VR on HMDs. The modified absolute category rating with hidden reference (M-ACR-HR) method was used in a subjective experiment for video quality assessment within two subsequent sessions along with a simulator sickness questionnaire (SSQ). A statistical analysis of the SSQ scores is performed to reveal the relationship between simulator sickness and participants&#x2019; experiences with VR regarding: (1) Individual symptoms, (2) Pairwise comparison of symptoms, and (3) Symptom clusters of nausea, oculomotor, disorientation, and total score. It is shown that the simulator sickness symptoms, in general, are slightly or rarely perceived across the different experience levels for the selected 360&#x00B0; videos. The results indicate that the reported simulator sickness increases in the second session for participants that never used VR on HMDs. Sufficiently long breaks between sessions should therefore be accounted for in the M-ACR-HR method to avoid that simulator sickness influences quality rating.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) has seen tremendous advances in head-mounted displays (HMDs), optics, media quality, and other improvements that facilitate immersive experiences. With the occurrence of new technologies like Cloud VR and networked VR video services, applications such as 360° video streaming are becoming more popular within the broader consumer markets. As a result, VR content is accessible to customers with rather different levels of experiences with immersive media, i.e., never, sometimes, or often use of VR. The question, therefore, arises to which degree simulator sickness is induced to viewers depending on their experiences with VR on HMDs. In this paper, simulator sickness is evaluated for 360° videos that were shown on an HTC Vive Pro HMD to participants having different levels of experience with VR on HMDs. The modified absolute category rating with hidden reference (M-ACR-HR) method was used in a subjective experiment for video quality assessment within two subsequent sessions along with a simulator sickness questionnaire (SSQ). A statistical analysis of the SSQ scores is performed to reveal the relationship between simulator sickness and participants’ experiences with VR regarding: (1) Individual symptoms, (2) Pairwise comparison of symptoms, and (3) Symptom clusters of nausea, oculomotor, disorientation, and total score. It is shown that the simulator sickness symptoms, in general, are slightly or rarely perceived across the different experience levels for the selected 360° videos. The results indicate that the reported simulator sickness increases in the second session for participants that never used VR on HMDs. Sufficiently long breaks between sessions should therefore be accounted for in the M-ACR-HR method to avoid that simulator sickness influences quality rating.", "fno": "09090490", "keywords": [ "Streaming Media", "Quality Assessment", "Visualization", "Virtual Reality", "Head Mounted Displays", "Immersive Experience", "Immersive Media", "360 X 00 B 0 Videos", "Subjective Experiments", "M ACR HR Method", "Simulator Sickness Questionnaire" ], "authors": [ { "affiliation": "Blekinge Institute of Technology,SE-37179,Karlskrona,Sweden", "fullName": "Majed Elwardy", "givenName": "Majed", "surname": "Elwardy", "__typename": "ArticleAuthorType" }, { "affiliation": "Blekinge Institute of Technology,SE-37179,Karlskrona,Sweden", "fullName": "Hans-Jürgen Zepernick", "givenName": "Hans-Jürgen", "surname": "Zepernick", "__typename": "ArticleAuthorType" }, { "affiliation": "Blekinge Institute of Technology,SE-37179,Karlskrona,Sweden", "fullName": "Yan Hu", "givenName": "Yan", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "Blekinge Institute of Technology,SE-37179,Karlskrona,Sweden", "fullName": "Thi My Chinh Chu", "givenName": "Thi My Chinh", "surname": "Chu", "__typename": "ArticleAuthorType" }, { "affiliation": "Blekinge Institute of Technology,SE-37179,Karlskrona,Sweden", "fullName": "Veronica Sundstedt", "givenName": "Veronica", "surname": "Sundstedt", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "477-484", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090506", "articleId": "1jIxnV9dXZ6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090621", "articleId": "1jIxxY1owow", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2018/7592/0/08699261", "title": "Visually Induced Motion Sickness in 360&#x00B0; Videos: Comparing and Combining Visual Optimization Techniques", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699261/19F1U8eRyMw", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a542", "title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798291", "title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798291/1cJ0GMB2sV2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798136", "title": "VR Sickness in Continuous Exposure to Live-action 180&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090670", "title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>&#x00B0;</sup> Videos Viewed with an HMD", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090670/1jIxwAw9Z9C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090456", "title": "On the Effect of Standing and Seated Viewing of 360&#x00B0; Videos on Subjective Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090456/1jIxyayiDp6", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a198", "title": "Assessment of the Simulator Sickness Questionnaire for Omnidirectional Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a198/1tuB40QFm92", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a042", "title": "Rating Duration Analysis for Subjective Quality Assessment of 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a042/1vg7TpMdSH6", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a176", "title": "Now I&#x2019;m Not Afraid: Reducing Fear of Missing Out in 360&#x00B0; Videos on a Head-Mounted Display using a Panoramic Thumbnail", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a176/1yeCYYdBmPC", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a183", "title": "Enabling Collaborative Interaction with 360&#x00B0; Panoramas between Large-scale Displays and Immersive Headsets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a183/1yeQBWUxple", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pBMeBWXAZ2", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pBMji687lK", "doi": "10.1109/ISMAR-Adjunct51615.2020.00024", "title": "Concept for a Virtual Reality Robot Ground Simulator", "normalizedTitle": "Concept for a Virtual Reality Robot Ground Simulator", "abstract": "For many VR applications where natural walking is necessary, the problem of a far smaller real movement space than in VR arises. Treadmills and redirected walking are established methods for this issue. However, both are limited to even surfaces and are unable to simulate different ground properties. Here a concept for a VR robot ground simulator is presented allowing to walk on steep ground or even staircase and which can simulate different undergrounds like sand, grass, or concrete. Starting from gait parameters, the technical requirements and implementation challenges for the realization of such a VR ground simulator are given.", "abstracts": [ { "abstractType": "Regular", "content": "For many VR applications where natural walking is necessary, the problem of a far smaller real movement space than in VR arises. Treadmills and redirected walking are established methods for this issue. However, both are limited to even surfaces and are unable to simulate different ground properties. Here a concept for a VR robot ground simulator is presented allowing to walk on steep ground or even staircase and which can simulate different undergrounds like sand, grass, or concrete. Starting from gait parameters, the technical requirements and implementation challenges for the realization of such a VR ground simulator are given.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For many VR applications where natural walking is necessary, the problem of a far smaller real movement space than in VR arises. Treadmills and redirected walking are established methods for this issue. However, both are limited to even surfaces and are unable to simulate different ground properties. Here a concept for a VR robot ground simulator is presented allowing to walk on steep ground or even staircase and which can simulate different undergrounds like sand, grass, or concrete. Starting from gait parameters, the technical requirements and implementation challenges for the realization of such a VR ground simulator are given.", "fno": "767500a036", "keywords": [ "Computer Simulation", "Mobile Robots", "Virtual Reality", "Gait Parameters", "Treadmills", "Steep Ground", "VR Robot Ground Simulator", "Redirected Walking", "Natural Walking", "VR Applications", "Virtual Reality Robot Ground Simulator", "Legged Locomotion", "Technical Requirements", "Solid Modeling", "Computational Modeling", "Robots", "Augmented Reality", "Virtual Reality", "Robotics", "Walking" ], "authors": [ { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Mario Lorenz", "givenName": "Mario", "surname": "Lorenz", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Sebastian Knopp", "givenName": "Sebastian", "surname": "Knopp", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Philipp Klimant", "givenName": "Philipp", "surname": "Klimant", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Johannes Quellmalz", "givenName": "Johannes", "surname": "Quellmalz", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Holger Schlegel", "givenName": "Holger", "surname": "Schlegel", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "36-38", "year": "2020", "issn": null, "isbn": "978-1-7281-7675-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "767500a030", "articleId": "1pBMhi6PsIw", "__typename": "AdjacentArticleType" }, "next": { "fno": "767500a039", "articleId": "1pBMfjaOy08", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811037", "title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780217", "title": "Development of Ground Surface Simulator for Tel-E-Merge System", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780217/12OmNC1Y5qm", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a732", "title": "Stay Safe&#x0021; Safety Precautions for Walking on a Conventional Treadmill in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a732/1CJcCMpD8xa", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a847", "title": "Auditory Feedback to Make Walking in Virtual Reality More Accessible", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a847/1JrR8Ihk9Tq", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a631", "title": "Gait Differences in the Real World and Virtual Reality: The Effect of Prior Virtual Reality Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a631/1JrRaogbK6I", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798095", "title": "Distance Judgments to On- and Off-Ground Objects in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798095/1cJ0Yxz6rrG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797842", "title": "A pilot study of gaze-gait relations analysis in a VR environment using HMD and LRF", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797842/1cJ15kwNxnO", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a627", "title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a607", "title": "Virtual Walking Generator from Omnidirectional Video with Ground-dependent Foot Vibrations", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a607/1tnWZe0CPwA", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJcsYYBYJi", "doi": "10.1109/VR51125.2022.00057", "title": "Effects of Virtual Room Size and Objects on Relative Translation Gain Thresholds in Redirected Walking", "normalizedTitle": "Effects of Virtual Room Size and Objects on Relative Translation Gain Thresholds in Redirected Walking", "abstract": "This paper investigates how the size of virtual space and objects within it affect the threshold range of relative translation gains, a Redirected Walking (RDW) technique that scales the user&#x2019;s movement in virtual space in different ratios for the width and depth. While previous studies assert that a virtual room&#x2019;s size affects relative translation gain thresholds on account of the virtual horizon&#x2019;s location, additional research is needed to explore this assumption through a structured approach to visual perception in Virtual Reality (VR). We estimate the relative translation gain thresholds in six spatial conditions configured by three room sizes and the presence of virtual objects (3 &#x00D7; 2), which were set according to differing Angles of Declination (AoDs) between eye-gaze and the forward-gaze. Results show that both size and virtual objects significantly affect the threshold range, it being greater in the large-sized condition and furnished condition. This indicates that the effect of relative translation gains can be further increased by constructing a perceived virtual movable space that is even larger than the adjusted virtual movable space and placing objects in it. Our study can be applied to adjust virtual spaces in synchronizing heterogeneous spaces without coordinate distortion where real and virtual objects can be leveraged to create realistic mutual spaces.", "abstracts": [ { "abstractType": "Regular", "content": "This paper investigates how the size of virtual space and objects within it affect the threshold range of relative translation gains, a Redirected Walking (RDW) technique that scales the user&#x2019;s movement in virtual space in different ratios for the width and depth. While previous studies assert that a virtual room&#x2019;s size affects relative translation gain thresholds on account of the virtual horizon&#x2019;s location, additional research is needed to explore this assumption through a structured approach to visual perception in Virtual Reality (VR). We estimate the relative translation gain thresholds in six spatial conditions configured by three room sizes and the presence of virtual objects (3 &#x00D7; 2), which were set according to differing Angles of Declination (AoDs) between eye-gaze and the forward-gaze. Results show that both size and virtual objects significantly affect the threshold range, it being greater in the large-sized condition and furnished condition. This indicates that the effect of relative translation gains can be further increased by constructing a perceived virtual movable space that is even larger than the adjusted virtual movable space and placing objects in it. Our study can be applied to adjust virtual spaces in synchronizing heterogeneous spaces without coordinate distortion where real and virtual objects can be leveraged to create realistic mutual spaces.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper investigates how the size of virtual space and objects within it affect the threshold range of relative translation gains, a Redirected Walking (RDW) technique that scales the user’s movement in virtual space in different ratios for the width and depth. While previous studies assert that a virtual room’s size affects relative translation gain thresholds on account of the virtual horizon’s location, additional research is needed to explore this assumption through a structured approach to visual perception in Virtual Reality (VR). We estimate the relative translation gain thresholds in six spatial conditions configured by three room sizes and the presence of virtual objects (3 × 2), which were set according to differing Angles of Declination (AoDs) between eye-gaze and the forward-gaze. Results show that both size and virtual objects significantly affect the threshold range, it being greater in the large-sized condition and furnished condition. This indicates that the effect of relative translation gains can be further increased by constructing a perceived virtual movable space that is even larger than the adjusted virtual movable space and placing objects in it. Our study can be applied to adjust virtual spaces in synchronizing heterogeneous spaces without coordinate distortion where real and virtual objects can be leveraged to create realistic mutual spaces.", "fno": "961700a379", "keywords": [ "Gaze Tracking", "Hazardous Areas", "Virtual Reality", "Visual Perception", "Virtual Objects", "Relative Translation Gain Thresholds", "Virtual Space", "Threshold Range", "Relative Translation Gains", "Redirected Walking Technique", "Virtual Horizon", "Virtual Reality", "Room Sizes", "Large Sized Condition", "Perceived Virtual Movable Space", "Adjusted Virtual Movable Space", "Placing Objects", "Virtual Room Size", "Ao Ds", "Eye Gaze", "Forward Gaze", "Legged Locomotion", "Three Dimensional Displays", "Conferences", "Virtual Reality", "User Interfaces", "Distortion", "Visual Perception", "Virtual Reality", "Relative Translation Gains", "Threshold", "Redirected Walking", "Angle Of Declination", "Virtual Object" ], "authors": [ { "affiliation": "KAIST UVR Lab.", "fullName": "Dooyoung Kim", "givenName": "Dooyoung", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST Visual Cognition Lab.", "fullName": "Jinwook Kim", "givenName": "Jinwook", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab.", "fullName": "Jae-Eun Shin", "givenName": "Jae-Eun", "surname": "Shin", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab.", "fullName": "Boram Yoon", "givenName": "Boram", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST Visual Cognition Lab.", "fullName": "Jeongmi Lee", "givenName": "Jeongmi", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST UVR Lab.", "fullName": "Woontack Woo", "givenName": "Woontack", "surname": "Woo", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "379-388", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a370", "articleId": "1CJcaEUfrW0", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a389", "articleId": "1CJbVF427gQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892373", "title": "Application of redirected walking in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504752", "title": "Disguising rotational gain for redirected walking in virtual reality: Effect of visual density", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504752/12OmNyr8YkS", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549412", "title": "Estimation of detection thresholds for acoustic based redirected walking techniques", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446479", "title": "Adopting the Roll Manipulation for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446225", "title": "Effect of Environment Size on Curvature Redirected Walking Thresholds", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08645699", "title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09785918", "title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances", "doi": null, "abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798117", "title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/02/09364750", "title": "Multi-Technique Redirected Walking Method", "doi": null, "abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a653", "title": "Adjusting Relative Translation Gains According to Space Size in Redirected Walking for Mixed Reality Mutual Space Generation", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a653/1tuANZ6Iz3q", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gFJctW2S76", "title": "2019 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gFJcMPvpVm", "doi": "10.1109/ISM46123.2019.00020", "title": "Versatile Video Coding of 360-Degree Video using Frame-Based FoV and Visual Attention", "normalizedTitle": "Versatile Video Coding of 360-Degree Video using Frame-Based FoV and Visual Attention", "abstract": "High quality omnidirectional video requires ultra high resolution formats encoded with very high bit rates to guarantee acceptable QoE in video delivery services. Since in general the full FoV, i.e., 360&#x00B0;, is not required at once by users, rather than agnostic encoding of the whole 360&#x00B0; video, this work proposes a flexible coding approach where the full FoV is mapped into video frames and efficiently encoded using intra-FoV prediction. By avoiding inter-FoV prediction, the proposed approach enables independent decoding of one or more FoVs extracted from a single compressed stream containing the full FoV video. To achieve improved quality in those FoVs which attract more visual attention, non-uniform coding is proposed for the new Versatile Video Coding standard (VVC), using perceptually-driven quantisation for each FoV. This strategy, makes use of visual attention maps to decrease the overall bit rate without compromising the quality of the most relevant regions. The simulation results show that the proposed coding mechanism achieves consistent quality gains in the relevant FoV without significant losses in the remaining ones. In comparison with the reference VVC, the proposed method is able to achieve average quality gains up to 1.56 dB and to efficiently adapt the coding parameters to the visual attention information.", "abstracts": [ { "abstractType": "Regular", "content": "High quality omnidirectional video requires ultra high resolution formats encoded with very high bit rates to guarantee acceptable QoE in video delivery services. Since in general the full FoV, i.e., 360&#x00B0;, is not required at once by users, rather than agnostic encoding of the whole 360&#x00B0; video, this work proposes a flexible coding approach where the full FoV is mapped into video frames and efficiently encoded using intra-FoV prediction. By avoiding inter-FoV prediction, the proposed approach enables independent decoding of one or more FoVs extracted from a single compressed stream containing the full FoV video. To achieve improved quality in those FoVs which attract more visual attention, non-uniform coding is proposed for the new Versatile Video Coding standard (VVC), using perceptually-driven quantisation for each FoV. This strategy, makes use of visual attention maps to decrease the overall bit rate without compromising the quality of the most relevant regions. The simulation results show that the proposed coding mechanism achieves consistent quality gains in the relevant FoV without significant losses in the remaining ones. In comparison with the reference VVC, the proposed method is able to achieve average quality gains up to 1.56 dB and to efficiently adapt the coding parameters to the visual attention information.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "High quality omnidirectional video requires ultra high resolution formats encoded with very high bit rates to guarantee acceptable QoE in video delivery services. Since in general the full FoV, i.e., 360°, is not required at once by users, rather than agnostic encoding of the whole 360° video, this work proposes a flexible coding approach where the full FoV is mapped into video frames and efficiently encoded using intra-FoV prediction. By avoiding inter-FoV prediction, the proposed approach enables independent decoding of one or more FoVs extracted from a single compressed stream containing the full FoV video. To achieve improved quality in those FoVs which attract more visual attention, non-uniform coding is proposed for the new Versatile Video Coding standard (VVC), using perceptually-driven quantisation for each FoV. This strategy, makes use of visual attention maps to decrease the overall bit rate without compromising the quality of the most relevant regions. The simulation results show that the proposed coding mechanism achieves consistent quality gains in the relevant FoV without significant losses in the remaining ones. In comparison with the reference VVC, the proposed method is able to achieve average quality gains up to 1.56 dB and to efficiently adapt the coding parameters to the visual attention information.", "fno": "560600a080", "keywords": [ "Data Compression", "Video Coding", "Video Streaming", "High Quality Omnidirectional Video", "Ultra High Resolution Formats", "High Bit Rates", "Video Delivery Services", "Agnostic Encoding", "Flexible Coding Approach", "Video Frames", "Intra Fo V Prediction", "Inter Fo V Prediction", "Fo V Video", "Nonuniform Coding", "Versatile Video Coding Standard", "Visual Attention Maps", "Coding Mechanism", "Visual Attention Information", "360 Degree Video", "Frame Based Fo V", "Omnidirectional Video Coding Independent Fo V Decoding Visual Attention Based Coding" ], "authors": [ { "affiliation": "Instituto de Telecomunicações", "fullName": "J. Carreira", "givenName": "J.", "surname": "Carreira", "__typename": "ArticleAuthorType" }, { "affiliation": "Instituto de Telecomunicações, Instituto Politécnico de Leiria", "fullName": "Sergio M. M. de Faria", "givenName": "Sergio M. M.", "surname": "de Faria", "__typename": "ArticleAuthorType" }, { "affiliation": "Instituto Politécnico de Leiria", "fullName": "Luis M. N. Tavora", "givenName": "Luis M. N.", "surname": "Tavora", "__typename": "ArticleAuthorType" }, { "affiliation": "Instituto de Telecomunicações, Universidade de Aveiro", "fullName": "António Navarro", "givenName": "António", "surname": "Navarro", "__typename": "ArticleAuthorType" }, { "affiliation": "Instituto de Telecomunicações, Instituto Politécnico de Leiria", "fullName": "Pedro A. A. Assunção", "givenName": "Pedro A. A.", "surname": "Assunção", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "80-805", "year": "2019", "issn": null, "isbn": "978-1-7281-5606-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "560600a072", "articleId": "1gFJdf4ARlm", "__typename": "AdjacentArticleType" }, "next": { "fno": "560600a086", "articleId": "1gFJgByGiBi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iscc/2010/7754/0/05546817", "title": "Low-complexity aggregation of collected images with correlated fields of view in wireless video sensor networks", "doi": null, "abstractUrl": "/proceedings-article/iscc/2010/05546817/12OmNvFHfD3", "parentPublication": { "id": "proceedings/iscc/2010/7754/0", "title": "The IEEE symposium on Computers and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2019/1198/0/119800a455", "title": "Analysis of Palette Mode on Versatile Video Coding", "doi": null, "abstractUrl": "/proceedings-article/mipr/2019/119800a455/19wB4RGoq7C", "parentPublication": { "id": "proceedings/mipr/2019/1198/0", "title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2019/1198/0/119800a026", "title": "Current Picture Referencing in Versatile Video Coding", "doi": null, "abstractUrl": "/proceedings-article/mipr/2019/119800a026/19wB5ULVAmQ", "parentPublication": { "id": "proceedings/mipr/2019/1198/0", "title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2019/1198/0/119800a297", "title": "Very Long Term Field of View Prediction for 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/mipr/2019/119800a297/19wB5oa2ORi", "parentPublication": { "id": "proceedings/mipr/2019/1198/0", "title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0/649700a531", "title": "VRFormer: 360-Degree Video Streaming with FoV Combined Prediction and Super resolution", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2022/649700a531/1LKwldiRY40", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0", "title": "2022 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2019/2862/0/286200a194", "title": "An Efficient FPGA Implementation of Versatile Video Coding Intra Prediction", "doi": null, "abstractUrl": "/proceedings-article/dsd/2019/286200a194/1ehBOGe6EZG", "parentPublication": { "id": "proceedings/dsd/2019/2862/0", "title": "2019 22nd Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09261971", "title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a291", "title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360&#x00B0; Video Streaming for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b861", "title": "Deep Learning based Spatial-Temporal In-loop filtering for Versatile Video Coding", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b861/1yVA358Wsfu", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1kwqyDCYmas", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1kwqECCHmus", "doi": "10.1109/ICMEW46912.2020.9106059", "title": "Field-Of-View Effect on The Perceived Quality of Omnidirectional Images", "normalizedTitle": "Field-Of-View Effect on The Perceived Quality of Omnidirectional Images", "abstract": "To visualize omnidirectional (or 360&#x00B0;) visual content, the rectilinear perspective projection is commonly employed to map the visual information from the observed area (or viewport) of the viewing sphere to a plane, resulting in the viewport image that is shown to the user. To enhance the user sense of immersion and engagement when exploring the 360&#x00B0; visual content, the viewport field of view (FoV) should be large; however, large FoVs introduce geometric distortions (e.g., objects stretching and shearing); thus, the FoV has an important role on the user&#x2019;s quality of experience (QoE). In this paper, a subjective test campaign was conducted, aiming to assess the FoV effect on perceived quality and to find the FoV that presents the best trade-off between user immersive experience and perceived geometric distortions. The analysis of the subjective test results shows that a FoV close to 110&#x00B0; has the highest preference among the observers who participated in the test.", "abstracts": [ { "abstractType": "Regular", "content": "To visualize omnidirectional (or 360&#x00B0;) visual content, the rectilinear perspective projection is commonly employed to map the visual information from the observed area (or viewport) of the viewing sphere to a plane, resulting in the viewport image that is shown to the user. To enhance the user sense of immersion and engagement when exploring the 360&#x00B0; visual content, the viewport field of view (FoV) should be large; however, large FoVs introduce geometric distortions (e.g., objects stretching and shearing); thus, the FoV has an important role on the user&#x2019;s quality of experience (QoE). In this paper, a subjective test campaign was conducted, aiming to assess the FoV effect on perceived quality and to find the FoV that presents the best trade-off between user immersive experience and perceived geometric distortions. The analysis of the subjective test results shows that a FoV close to 110&#x00B0; has the highest preference among the observers who participated in the test.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To visualize omnidirectional (or 360°) visual content, the rectilinear perspective projection is commonly employed to map the visual information from the observed area (or viewport) of the viewing sphere to a plane, resulting in the viewport image that is shown to the user. To enhance the user sense of immersion and engagement when exploring the 360° visual content, the viewport field of view (FoV) should be large; however, large FoVs introduce geometric distortions (e.g., objects stretching and shearing); thus, the FoV has an important role on the user’s quality of experience (QoE). In this paper, a subjective test campaign was conducted, aiming to assess the FoV effect on perceived quality and to find the FoV that presents the best trade-off between user immersive experience and perceived geometric distortions. The analysis of the subjective test results shows that a FoV close to 110° has the highest preference among the observers who participated in the test.", "fno": "09106059", "keywords": [ "Image Sensors", "Image Texture", "Rendering Computer Graphics", "User Interfaces", "Virtual Reality", "Perceived Quality", "Omnidirectional Images", "Visual Content", "Rectilinear Perspective Projection", "Visual Information", "Observed Area", "Viewing Sphere", "Viewport Image", "User Sense", "Viewport Field", "Geometric Distortions", "Fo V Effect", "Subjective Test", "Field Of View Effect", "Quality Of Experience", "Qo E", "Visualization", "Conferences", "Immersive Experience", "Observers", "Distortion", "Quality Of Experience", "Shearing", "Omnidirectional Images", "Virtual Reality", "360 X 00 B 0 Image", "Quality Assessment", "Field Of View" ], "authors": [ { "affiliation": "Universidade de Lisboa - Instituto de Telecomunicações,Instituto Superior Técnico,Lisboa,Portugal", "fullName": "Falah Jabar", "givenName": "Falah", "surname": "Jabar", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade de Lisboa - Instituto de Telecomunicações,Instituto Superior Técnico,Lisboa,Portugal", "fullName": "Joño Ascenso", "givenName": "Joño", "surname": "Ascenso", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade de Lisboa - Instituto de Telecomunicações,Instituto Superior Técnico,Lisboa,Portugal", "fullName": "Maria Paula Queluz", "givenName": "Maria Paula", "surname": "Queluz", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2020", "issn": null, "isbn": "978-1-7281-1485-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09106038", "articleId": "1kwqKWSV808", "__typename": "AdjacentArticleType" }, "next": { "fno": "09105977", "articleId": "1kwqALfskDe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2018/6857/0/685700a089", "title": "HTTP/2-Based Streaming Solutions for Tiled Omnidirectional Videos", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/685700a089/17D45We0UCp", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a421", "title": "View-Dependent Omnidirectional Video Encapsulation Using Multiple Tracks", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a421/1ap5yq7z67u", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093582", "title": "360 Panorama Synthesis from a Sparse Set of Images with Unknown Field of View", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093582/1jPbrcnBX8s", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09212608", "title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a291", "title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360&#x00B0; Video Streaming for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a510", "title": "The Effect of Camera Height on The User Experience of Mid-air 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a510/1tnXMvwgvmg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a542", "title": "Field of View Effect on Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a198", "title": "Assessment of the Simulator Sickness Questionnaire for Omnidirectional Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a198/1tuB40QFm92", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNznkKb4", "doi": "10.1109/VR.2017.7892302", "title": "Estimating the motion-to-photon latency in head mounted displays", "normalizedTitle": "Estimating the motion-to-photon latency in head mounted displays", "abstract": "We present a method for estimating the Motion-to-Photon (End-to-End) latency of head mounted displays (HMDs). The specific HMD evaluated in our study was the Oculus Rift DK2, but the procedure is general. We mounted the HMD on a pendulum to introduce damped sinusoidal motion to the HMD during the pendulum swing. The latency was estimated by calculating the phase shift between the captured signals of the physical motion of the HMD and a motion-dependent gradient stimulus rendered on the display. We used the proposed method to estimate both rotational and translational Motion-to-Photon latencies of the Oculus Rift DK2.", "abstracts": [ { "abstractType": "Regular", "content": "We present a method for estimating the Motion-to-Photon (End-to-End) latency of head mounted displays (HMDs). The specific HMD evaluated in our study was the Oculus Rift DK2, but the procedure is general. We mounted the HMD on a pendulum to introduce damped sinusoidal motion to the HMD during the pendulum swing. The latency was estimated by calculating the phase shift between the captured signals of the physical motion of the HMD and a motion-dependent gradient stimulus rendered on the display. We used the proposed method to estimate both rotational and translational Motion-to-Photon latencies of the Oculus Rift DK2.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a method for estimating the Motion-to-Photon (End-to-End) latency of head mounted displays (HMDs). The specific HMD evaluated in our study was the Oculus Rift DK2, but the procedure is general. We mounted the HMD on a pendulum to introduce damped sinusoidal motion to the HMD during the pendulum swing. The latency was estimated by calculating the phase shift between the captured signals of the physical motion of the HMD and a motion-dependent gradient stimulus rendered on the display. We used the proposed method to estimate both rotational and translational Motion-to-Photon latencies of the Oculus Rift DK2.", "fno": "07892302", "keywords": [ "Resists", "Potentiometers", "Photodiodes", "Virtual Reality", "Cameras", "Estimation", "Frequency Domain Analysis", "Motion To Photon Latency", "End To End Latency", "Head Mounted Displays" ], "authors": [ { "affiliation": "Department of Electrical Engineering and Computer Science, York University, Canada", "fullName": "Jingbo Zhao", "givenName": "Jingbo", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electrical Engineering and Computer Science, York University, Canada", "fullName": "Robert S. Allison", "givenName": "Robert S.", "surname": "Allison", "__typename": "ArticleAuthorType" }, { "affiliation": "Flight Research Laboratory, National Research Council, Canada", "fullName": "Margarita Vinnikov", "givenName": "Margarita", "surname": "Vinnikov", "__typename": "ArticleAuthorType" }, { "affiliation": "Flight Research Laboratory, National Research Council, Canada", "fullName": "Sion Jennings", "givenName": "Sion", "surname": "Jennings", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "313-314", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892301", "articleId": "12OmNx5GUce", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892303", "articleId": "12OmNx7ouWn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892261", "title": "The AR-Rift 2 prototype", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892261/12OmNCcKQmq", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504722", "title": "Effect of head mounted display latency on human stability during quiescent standing on one foot", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504722/12OmNrJAdXj", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504724", "title": "Measurement of Head Mounted Display's latency in rotation and side effect caused by lag compensation by simultaneous observation — An example result using Oculus Rift DK2", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504724/12OmNvT2oJn", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504717", "title": "OST Rift: Temporally consistent augmented reality with a consumer optical see-through head-mounted display", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504717/12OmNzXFoKD", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446139", "title": "The Effect of Immersive Displays on Situation Awareness in Virtual Environments for Aerial Firefighting Air Attack Supervisor Training", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446139/13bd1AIBM1Q", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798320", "title": "Evaluating Dynamic Characteristics of Head Mounted Display in Parallel Movement with Simultaneous Subjective Observation Method", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798320/1cJ0TRvTuOk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798369", "title": "Brain Activity in Virtual Reality: Assessing Signal Quality of High-Resolution EEG While Using Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798369/1cJ18Pncw9y", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a058", "title": "New System to Measure Motion Motion-to-Photon Latency of Virtual Reality Head Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a058/1gyskZKBOtq", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090580", "title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a413", "title": "Selective Foveated Ray Tracing for Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1aDSuDp9DuU", "title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "acronym": "percom-workshops", "groupId": "1000552", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1aDSEX2e3Ys", "doi": "10.1109/PERCOMW.2019.8730658", "title": "Anticipated Acceptance of Head Mounted Displays: a content analysis of YouTube comments", "normalizedTitle": "Anticipated Acceptance of Head Mounted Displays: a content analysis of YouTube comments", "abstract": "For further development of technologies but also for the implementation in real life contexts, it is important to understand users' perspectives on the anticipated use of innovative technologies in an early development phase. In addition, it is also important to get a better understanding of the explanation of this behavior towards technology use in later stages. Although Head Mounted Displays (HMDs) are not really new anymore, the uptake has been slow so far and people showed some extreme reactions. The objective of this study was to analyze the content of YouTube comments on videos of HMDs, in order to get a better understanding of relevant factors in this early phase of potential acceptance of HMDs. We analyzed 379 YouTube comments on HMDs using content analysis. Comments were divided into three groups: HMD, video, and miscellaneous. Comments about HMDs n=24 were further analyzed. Most of the commenters showed a positive attitude to HMDs. Within the positive attitude, the most expressed themes were comments about the type of use (gaming), positive evaluations (emotions, coolness) and perceived need for an HMD. Within the negative attitudes, negative evaluations (judgments, emotions) were showed most and negative comparisons to other products were made. In neutral attitudes, the main theme was the type of use (gaming). The results specify a couple of user needs and social norms and values which people attach in this early phase to HMDs. In this early phase of acceptance, some early adoption observations were found as in when someone talks about the type of use (felt needs) and positive judgments (social norms). Early signs of rejection were found by negative judgments (social norms) and comparisons with other products (previous practice).", "abstracts": [ { "abstractType": "Regular", "content": "For further development of technologies but also for the implementation in real life contexts, it is important to understand users' perspectives on the anticipated use of innovative technologies in an early development phase. In addition, it is also important to get a better understanding of the explanation of this behavior towards technology use in later stages. Although Head Mounted Displays (HMDs) are not really new anymore, the uptake has been slow so far and people showed some extreme reactions. The objective of this study was to analyze the content of YouTube comments on videos of HMDs, in order to get a better understanding of relevant factors in this early phase of potential acceptance of HMDs. We analyzed 379 YouTube comments on HMDs using content analysis. Comments were divided into three groups: HMD, video, and miscellaneous. Comments about HMDs n=24 were further analyzed. Most of the commenters showed a positive attitude to HMDs. Within the positive attitude, the most expressed themes were comments about the type of use (gaming), positive evaluations (emotions, coolness) and perceived need for an HMD. Within the negative attitudes, negative evaluations (judgments, emotions) were showed most and negative comparisons to other products were made. In neutral attitudes, the main theme was the type of use (gaming). The results specify a couple of user needs and social norms and values which people attach in this early phase to HMDs. In this early phase of acceptance, some early adoption observations were found as in when someone talks about the type of use (felt needs) and positive judgments (social norms). Early signs of rejection were found by negative judgments (social norms) and comparisons with other products (previous practice).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For further development of technologies but also for the implementation in real life contexts, it is important to understand users' perspectives on the anticipated use of innovative technologies in an early development phase. In addition, it is also important to get a better understanding of the explanation of this behavior towards technology use in later stages. Although Head Mounted Displays (HMDs) are not really new anymore, the uptake has been slow so far and people showed some extreme reactions. The objective of this study was to analyze the content of YouTube comments on videos of HMDs, in order to get a better understanding of relevant factors in this early phase of potential acceptance of HMDs. We analyzed 379 YouTube comments on HMDs using content analysis. Comments were divided into three groups: HMD, video, and miscellaneous. Comments about HMDs n=24 were further analyzed. Most of the commenters showed a positive attitude to HMDs. Within the positive attitude, the most expressed themes were comments about the type of use (gaming), positive evaluations (emotions, coolness) and perceived need for an HMD. Within the negative attitudes, negative evaluations (judgments, emotions) were showed most and negative comparisons to other products were made. In neutral attitudes, the main theme was the type of use (gaming). The results specify a couple of user needs and social norms and values which people attach in this early phase to HMDs. In this early phase of acceptance, some early adoption observations were found as in when someone talks about the type of use (felt needs) and positive judgments (social norms). Early signs of rejection were found by negative judgments (social norms) and comparisons with other products (previous practice).", "fno": "08730658", "keywords": [ "Helmet Mounted Displays", "Social Networking Online", "Technology Acceptance Model", "Text Analysis", "Anticipated Acceptance", "Content Analysis", "Head Mounted Displays", "You Tube Comments", "User Perspectives", "HMD Acceptance", "Technology Acceptance", "Videos", "Glass", "Google", "You Tube", "Resists", "Augmented Reality", "Biomedical Imaging", "Technology Adoption", "Technology Acceptance", "Head Mounted Displays", "Content Analysis" ], "authors": [ { "affiliation": "Saxion University of Applied Sciences, Enschede, The Netherlands", "fullName": "Niek Zuidhof", "givenName": "Niek", "surname": "Zuidhof", "__typename": "ArticleAuthorType" }, { "affiliation": "Amsterdam University of Applied Sciences, Amsterdam, The Netherlands", "fullName": "Somaya Ben Allouch", "givenName": "Somaya Ben", "surname": "Allouch", "__typename": "ArticleAuthorType" }, { "affiliation": "Newcom Research & Consultancy, Enschede, The Netherlands", "fullName": "Oscar Peters", "givenName": "Oscar", "surname": "Peters", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Philosophy, University of Twente, Enschede, The Netherlands", "fullName": "Peter-Paul Verbeek", "givenName": "Peter-Paul", "surname": "Verbeek", "__typename": "ArticleAuthorType" } ], "idPrefix": "percom-workshops", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "399-402", "year": "2019", "issn": null, "isbn": "978-1-5386-9151-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08730689", "articleId": "1aDSzO4ueVG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08730711", "articleId": "1aDSBQt5TYQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948513", "title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504733", "title": "A low-cost, low-latency approach to dynamic immersion in occlusive head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504733/12OmNBEGYLT", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbesc/2016/2653/0/2653a024", "title": "Towards a Low-Cost Augmented Reality Head-Mounted Display with Real-Time Eye Center Location Capability", "doi": null, "abstractUrl": "/proceedings-article/sbesc/2016/2653a024/12OmNzxgHyv", "parentPublication": { "id": "proceedings/sbesc/2016/2653/0", "title": "2016 VI Brazilian Symposium on Computing Systems Engineering (SBESC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446175", "title": "A User-Based Comparison of Two Augmented Reality Glasses", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446175/13bd1eTtWYo", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448289", "title": "Performance Envelopes of in-Air Direct and Smartwatch Indirect Control for Head-Mounted Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448289/13bd1fZBGcE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08052554", "title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09850416", "title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797852", "title": "Perception of Volumetric Characters&#x0027; Eye-Gaze Direction in Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2019/5686/0/568600a672", "title": "Author Gender Identification from Arabic Youtube Comments", "doi": null, "abstractUrl": "/proceedings-article/sitis/2019/568600a672/1j9xCIXohdC", "parentPublication": { "id": "proceedings/sitis/2019/5686/0", "title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2020/7283/0/728300a331", "title": "Conspiracy and Rumor Correction: Analysis of Social Media Users' Comments", "doi": null, "abstractUrl": "/proceedings-article/icict/2020/728300a331/1jPb3vSHjKE", "parentPublication": { "id": "proceedings/icict/2020/7283/0", "title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1a9pQj3G", "doi": "10.1109/VR.2019.8797921", "title": "TacTiles: Dual-Mode Low-Power Electromagnetic Actuators for Rendering Continuous Contact and Spatial Haptic Patterns in VR", "normalizedTitle": "TacTiles: Dual-Mode Low-Power Electromagnetic Actuators for Rendering Continuous Contact and Spatial Haptic Patterns in VR", "abstract": "We introduce TacTiles, light (1.8g), low-power (130 mW), and small form-factor (1 cm<sup>3</sup>) electromagnetic actuators that can form a flexible haptic array to provide localized tactile feedback. Our novel hardware design uses a custom 8-layer PCB, dampening materials, and asymmetric latching, enabling two distinct modes of actuation: contact and pulse mode. We leverage these modes in Virtual Reality (VR) to render continuous contact with objects and the exploration of object surfaces and volumes with spatial haptic patterns. Results from a series of experiments show that users are able to localize feedback, discriminate between modes with high accuracy, and differentiate objects from haptic surfaces and volumes even without looking at them.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce TacTiles, light (1.8g), low-power (130 mW), and small form-factor (1 cm<sup>3</sup>) electromagnetic actuators that can form a flexible haptic array to provide localized tactile feedback. Our novel hardware design uses a custom 8-layer PCB, dampening materials, and asymmetric latching, enabling two distinct modes of actuation: contact and pulse mode. We leverage these modes in Virtual Reality (VR) to render continuous contact with objects and the exploration of object surfaces and volumes with spatial haptic patterns. Results from a series of experiments show that users are able to localize feedback, discriminate between modes with high accuracy, and differentiate objects from haptic surfaces and volumes even without looking at them.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce TacTiles, light (1.8g), low-power (130 mW), and small form-factor (1 cm3) electromagnetic actuators that can form a flexible haptic array to provide localized tactile feedback. Our novel hardware design uses a custom 8-layer PCB, dampening materials, and asymmetric latching, enabling two distinct modes of actuation: contact and pulse mode. We leverage these modes in Virtual Reality (VR) to render continuous contact with objects and the exploration of object surfaces and volumes with spatial haptic patterns. Results from a series of experiments show that users are able to localize feedback, discriminate between modes with high accuracy, and differentiate objects from haptic surfaces and volumes even without looking at them.", "fno": "08797921", "keywords": [ "Electromagnetic Actuators", "Feedback", "Haptic Interfaces", "Printed Circuits", "Tactile Sensors", "Virtual Reality", "Tac Tiles", "Dual Mode Low Power Electromagnetic Actuators", "Spatial Haptic Patterns", "VR", "Form Factor", "Flexible Haptic Array", "Localized Tactile Feedback", "Pulse Mode", "Object Surfaces", "Haptic Surfaces", "Hardware Design", "8 Layer PCB", "Mass 1 8 G", "Power 130 0 M W", "Actuators", "Haptic Interfaces", "Rendering Computer Graphics", "Skin", "Electromagnetics", "Force", "Tactile Sensors", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Devices X 2014 Haptic Devices", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality" ], "authors": [ { "affiliation": "ETH Zurich", "fullName": "Velko Vechev", "givenName": "Velko", "surname": "Vechev", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich, EPFL", "fullName": "Juan Zarate", "givenName": "Juan", "surname": "Zarate", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich", "fullName": "David Lindlbauer", "givenName": "David", "surname": "Lindlbauer", "__typename": "ArticleAuthorType" }, { "affiliation": "EPFL", "fullName": "Ronan Hinchet", "givenName": "Ronan", "surname": "Hinchet", "__typename": "ArticleAuthorType" }, { "affiliation": "EPFL", "fullName": "Herbert Shea", "givenName": "Herbert", "surname": "Shea", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich", "fullName": "Otmar Hilliges", "givenName": "Otmar", "surname": "Hilliges", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "312-320", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798181", "articleId": "1cJ0KPnDSV2", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798205", "articleId": "1cJ1bY8RJIc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2000/6478/0/64780019", "title": "Shock and Vortex Visualization Using a Combined Visual/Haptic Interface", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780019/12OmNCmGNWb", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479950", "title": "The Touch Thimble: Providing Fingertip Contact Feedback During Point-Force Haptic Interaction", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479950/12OmNvk7JWz", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcis/2009/3571/4/3571d259", "title": "Contact Elements Prediction Based Haptic Rendering Method for Collaborative Virtual Assembly System", "doi": null, "abstractUrl": "/proceedings-article/gcis/2009/3571d259/12OmNwJybQW", "parentPublication": { "id": "proceedings/gcis/2009/3571/4", "title": "2009 WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444676", "title": "Haptic figure-ground differentation via a haptic glance", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444676/12OmNzTH0NS", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2008/01/tth2008010039", "title": "Six-DoF Haptic Rendering of Contact Between Geometrically Complex Reduced Deformable Models", "doi": null, "abstractUrl": "/journal/th/2008/01/tth2008010039/13rRUEgarBB", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/02/tth2011020111", "title": "Geodesic Spline Interface for Haptic Curve Rendering", "doi": null, "abstractUrl": "/journal/th/2011/02/tth2011020111/13rRUILtJr2", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2014/02/06642031", "title": "Spatial Asymmetry in Tactile Sensor Skin Deformation Aids Perception of Edge Orientation During Haptic Exploration", "doi": null, "abstractUrl": "/journal/th/2014/02/06642031/13rRUwjXZSm", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/02/tth2011020088", "title": "Exploration of Tactile Contact in a Haptic Display: Effects of Contact Velocity and Transient Vibrations", "doi": null, "abstractUrl": "/journal/th/2011/02/tth2011020088/13rRUxE04tK", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/03/07784835", "title": "A 3-RSR Haptic Wearable Device for Rendering Fingertip Contact Forces", "doi": null, "abstractUrl": "/journal/th/2017/03/07784835/13rRUxZ0o1H", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartiot/2020/6514/0/09191998", "title": "Dynamic and Accurate Force Feedback for Electromagnetic Haptic Display", "doi": null, "abstractUrl": "/proceedings-article/smartiot/2020/09191998/1n0Iu1Fpx1m", "parentPublication": { "id": "proceedings/smartiot/2020/6514/0", "title": "2020 IEEE International Conference on Smart Internet of Things (SmartIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7W6Tyzcty", "doi": "10.1109/ISMAR-Adjunct57072.2022.00054", "title": "Application of Participatory Design Methodology in AR: Developing Prototypes for Two Context Scenarios", "normalizedTitle": "Application of Participatory Design Methodology in AR: Developing Prototypes for Two Context Scenarios", "abstract": "Recent technological developments and projects are now becoming more multidisciplinary. However, in designing augmented reality training and support systems (ARTSS), researchers in this field may already hold biases that lean towards the de facto standard of what constitutes good ARTSS. These biases are not inherently harmful, but there exists the risk that these may not lead to answering the demands and goals of the intended end-user that are of completely different specializations. To address this risk, we suggest using Par-ticipatory Design (PD) as a methodology for creating and designing ARTSS. By putting the collaborators as co-designers in the authoring of augmented reality prototypes, we can verify that the final ARTSS meets the end-user&#x0027;s requirements.", "abstracts": [ { "abstractType": "Regular", "content": "Recent technological developments and projects are now becoming more multidisciplinary. However, in designing augmented reality training and support systems (ARTSS), researchers in this field may already hold biases that lean towards the de facto standard of what constitutes good ARTSS. These biases are not inherently harmful, but there exists the risk that these may not lead to answering the demands and goals of the intended end-user that are of completely different specializations. To address this risk, we suggest using Par-ticipatory Design (PD) as a methodology for creating and designing ARTSS. By putting the collaborators as co-designers in the authoring of augmented reality prototypes, we can verify that the final ARTSS meets the end-user&#x0027;s requirements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent technological developments and projects are now becoming more multidisciplinary. However, in designing augmented reality training and support systems (ARTSS), researchers in this field may already hold biases that lean towards the de facto standard of what constitutes good ARTSS. These biases are not inherently harmful, but there exists the risk that these may not lead to answering the demands and goals of the intended end-user that are of completely different specializations. To address this risk, we suggest using Par-ticipatory Design (PD) as a methodology for creating and designing ARTSS. By putting the collaborators as co-designers in the authoring of augmented reality prototypes, we can verify that the final ARTSS meets the end-user's requirements.", "fno": "536500a244", "keywords": [ "Augmented Reality", "Groupware", "Augmented Reality Prototypes", "Augmented Reality Training And Support Systems", "Co Designers", "Completely Different Specializations", "Context Scenarios", "Creating Designing ARTSS", "Final ARTSS", "Intended End User", "Participatory Design Methodology", "Training", "Prototypes", "Surgery", "Collaboration", "Stakeholders", "Iterative Methods", "Augmented Reality", "Participatory Design", "Iterative Pro Totyping", "Surgery Training", "Occupational Therapy", "Human Centered Computing", "Interaction Design", "Interaction Design Process And Methods", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality" ], "authors": [ { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Isidro III Butaslac", "givenName": "Isidro III", "surname": "Butaslac", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Trento,Italy", "fullName": "Alessandro Luchetti", "givenName": "Alessandro", "surname": "Luchetti", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Junya Ino", "givenName": "Junya", "surname": "Ino", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Yuichiro Fujimoto", "givenName": "Yuichiro", "surname": "Fujimoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Taishi Sawabe", "givenName": "Taishi", "surname": "Sawabe", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Masayuki Kanbara", "givenName": "Masayuki", "surname": "Kanbara", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Hirokazu Kato", "givenName": "Hirokazu", "surname": "Kato", "__typename": "ArticleAuthorType" }, { "affiliation": "Osaka University Graduate School of Medicine.,Japan", "fullName": "Keisuke Uemura", "givenName": "Keisuke", "surname": "Uemura", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Yoshito Otake", "givenName": "Yoshito", "surname": "Otake", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology,Japan", "fullName": "Yoshinobu Sato", "givenName": "Yoshinobu", "surname": "Sato", "__typename": "ArticleAuthorType" }, { "affiliation": "Ehime University Graduate School of Medicine,Japan", "fullName": "Masaki Takao", "givenName": "Masaki", "surname": "Takao", "__typename": "ArticleAuthorType" }, { "affiliation": "Osaka University Graduate School of Medicine.,Japan", "fullName": "Nobuhiko Sugano", "givenName": "Nobuhiko", "surname": "Sugano", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "244-248", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a238", "articleId": "1J7W8EXefza", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a249", "articleId": "1J7WceEw2di", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2005/8929/0/01492779", "title": "Template based authoring for AR based service scenarios", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492779/12OmNAJ4pfX", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671807", "title": "User awareness of tracking uncertainties in AR navigation scenarios", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671807/12OmNqFJhRx", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2011/4420/0/4420b009", "title": "Mirror Worlds: Experimenting with Heterogeneous AR", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2011/4420b009/12OmNzFv4kl", "parentPublication": { "id": "proceedings/isuvr/2011/4420/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2011/4445/0/4445a178", "title": "RPR-SORS: An Authoring Toolkit for Photorealistic AR", "doi": null, "abstractUrl": "/proceedings-article/svr/2011/4445a178/12OmNzayN9c", "parentPublication": { "id": "proceedings/svr/2011/4445/0", "title": "2011 XIII Symposium on Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2016/4149/0/4149a071", "title": "A User Perspective Analysis on Augmented vs 3D Printed Prototypes for Product's Project Design", "doi": null, "abstractUrl": "/proceedings-article/svr/2016/4149a071/12OmNzcPAeW", "parentPublication": { "id": "proceedings/svr/2016/4149/0", "title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007246", "title": "AR Feels &#x201c;Softer&#x201d; than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/04/mcg2008040040", "title": "Toward Next-Gen Mobile AR Games", "doi": null, "abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a876", "title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a019", "title": "AR Mini-Games for Supermarkets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a019/1pBMf0WeeVa", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a340", "title": "Human-Object Interaction in AR", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a340/1vg7QG06UcE", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyUWQR6", "title": "Virtual Reality Annual International Symposium", "acronym": "vrais", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "1993", "__typename": "ProceedingType" }, "article": { "id": "12OmNC3FGbs", "doi": "10.1109/VRAIS.1993.380805", "title": "Adding reality to the virtual", "normalizedTitle": "Adding reality to the virtual", "abstract": "Distributed interactive simulation provides an environment for realistic participation in virtual worlds. Humans interact with the virtual world through interface devices such as switches and knobs, keyboards and mice, touch screens and data gloves. The time has come for the seamless integration of these physical, real-world human interface devices with the systems that generate and display the virtual environments. The merging of these two areas will result in virtual world experiences more realistic than any available today.", "abstracts": [ { "abstractType": "Regular", "content": "Distributed interactive simulation provides an environment for realistic participation in virtual worlds. Humans interact with the virtual world through interface devices such as switches and knobs, keyboards and mice, touch screens and data gloves. The time has come for the seamless integration of these physical, real-world human interface devices with the systems that generate and display the virtual environments. The merging of these two areas will result in virtual world experiences more realistic than any available today.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Distributed interactive simulation provides an environment for realistic participation in virtual worlds. Humans interact with the virtual world through interface devices such as switches and knobs, keyboards and mice, touch screens and data gloves. The time has come for the seamless integration of these physical, real-world human interface devices with the systems that generate and display the virtual environments. The merging of these two areas will result in virtual world experiences more realistic than any available today.", "fno": "00380805", "keywords": [ "Real World Human Interface Devices", "Distributed Interactive Simulation", "Virtual Reality", "Virtual Worlds", "Interface Devices", "Switches", "Knobs", "Keyboards", "Mice", "Touch Screens", "Data Gloves" ], "authors": [ { "affiliation": "LORAL Adv. Distributed Simulation, Inc., Cambridge, MA, USA", "fullName": "P.J. Metzger", "givenName": "P.J.", "surname": "Metzger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrais", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1993-09-01T00:00:00", "pubType": "proceedings", "pages": "7-13", "year": "1993", "issn": null, "isbn": "0-7803-1363-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00380804", "articleId": "12OmNyqiaUa", "__typename": "AdjacentArticleType" }, "next": { "fno": "00380806", "articleId": "12OmNwxlreo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1eTtWYT", "doi": "10.1109/VR.2018.8446250", "title": "Effects of Hand Representations for Typing in Virtual Reality", "normalizedTitle": "Effects of Hand Representations for Typing in Virtual Reality", "abstract": "Alphanumeric text entry is a challenge for Virtual Reality (VR) applications. VR enables new capabilities, impossible in the real world, such as an unobstructed view of the keyboard, without occlusion by the user's physical hands. Several hand representations have been proposed for typing in VR on standard physical keyboards. However, to date, these hand representations have not been compared regarding their performance and effects on presence for VR text entry. Our work addresses this gap by comparing existing hand representations with minimalistic fingertip visualization. We study the effects of four hand representations (no hand representation, inverse kinematic model, fingertip visualization using spheres and video inlay) on typing in VR using a standard physical keyboard with 24 participants. We found that the fingertip visualization and video inlay both resulted in statistically significant lower text entry error rates compared to no hand or inverse kinematic model representations. We found no statistical differences in text entry speed.", "abstracts": [ { "abstractType": "Regular", "content": "Alphanumeric text entry is a challenge for Virtual Reality (VR) applications. VR enables new capabilities, impossible in the real world, such as an unobstructed view of the keyboard, without occlusion by the user's physical hands. Several hand representations have been proposed for typing in VR on standard physical keyboards. However, to date, these hand representations have not been compared regarding their performance and effects on presence for VR text entry. Our work addresses this gap by comparing existing hand representations with minimalistic fingertip visualization. We study the effects of four hand representations (no hand representation, inverse kinematic model, fingertip visualization using spheres and video inlay) on typing in VR using a standard physical keyboard with 24 participants. We found that the fingertip visualization and video inlay both resulted in statistically significant lower text entry error rates compared to no hand or inverse kinematic model representations. We found no statistical differences in text entry speed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Alphanumeric text entry is a challenge for Virtual Reality (VR) applications. VR enables new capabilities, impossible in the real world, such as an unobstructed view of the keyboard, without occlusion by the user's physical hands. Several hand representations have been proposed for typing in VR on standard physical keyboards. However, to date, these hand representations have not been compared regarding their performance and effects on presence for VR text entry. Our work addresses this gap by comparing existing hand representations with minimalistic fingertip visualization. We study the effects of four hand representations (no hand representation, inverse kinematic model, fingertip visualization using spheres and video inlay) on typing in VR using a standard physical keyboard with 24 participants. We found that the fingertip visualization and video inlay both resulted in statistically significant lower text entry error rates compared to no hand or inverse kinematic model representations. We found no statistical differences in text entry speed.", "fno": "08446250", "keywords": [ "Keyboards", "Text Analysis", "User Interfaces", "Virtual Reality", "Hand Representation", "VR Text Entry", "Standard Physical Keyboard", "Inverse Kinematic Model Representations", "Alphanumeric Text Entry", "Virtual Reality Applications", "Hand Representations", "Keyboards", "Visualization", "Error Analysis", "Decoding", "Virtual Reality", "Standards", "Electronic Mail", "H 5 2 User Interfaces Input Devices And Strategies" ], "authors": [ { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Jens Grubert", "givenName": "Jens", "surname": "Grubert", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Passau", "fullName": "Lukas Witzani", "givenName": "Lukas", "surname": "Witzani", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research", "fullName": "Eyal Ofek", "givenName": "Eyal", "surname": "Ofek", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research", "fullName": "Michel Pahud", "givenName": "Michel", "surname": "Pahud", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Passau", "fullName": "Matthias Kranz", "givenName": "Matthias", "surname": "Kranz", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cambridge", "fullName": "Per Ola Kristensson", "givenName": "Per Ola", "surname": "Kristensson", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "151-158", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446177", "articleId": "13bd1eSlyu1", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446059", "articleId": "13bd1eSlysI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2002/1492/0/14920287", "title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920287/12OmNqH9htu", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446217", "title": "Effects of Image Size and Structural Complexity on Time and Precision of Hand Movements in Head Mounted Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446217/13bd1AITn9W", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446059", "title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08456570", "title": "PizzaText: Text Entry for Virtual Reality Systems Using Dual Thumbsticks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08456570/14M3DYGRu3o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874256", "title": "Efficient Flower Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a008", "title": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a008/1JrR2KZbVXq", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049695", "title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797740", "title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794572", "title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a387", "title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KmF7rVz6Y8", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KmF8k8WXi8", "doi": "10.1109/AIVR56993.2022.00028", "title": "Direct Interaction Word-Gesture Text Input in Virtual Reality", "normalizedTitle": "Direct Interaction Word-Gesture Text Input in Virtual Reality", "abstract": "As Virtual Reality (VR) devices become more affordable and experience a more widespread adoption, applications for this immersive technology become more diverse, increasingly also including text-based uses, such as messaging and text processing. While text input methods for VR have existed for a long time, none have reached the ease-of-use or performance of conventional, physical keyboards. In the absence of physical keyboards, touch-screen devices have adopted word-gesture keyboards, that allow users to input text through smooth motions. In this work, we introduce a word-gesture keyboard for VR that uses direct interaction with a virtual keyboard, to allow a more direct transfer of hand motion. We present a preliminary evaluation with promising results, which suggest usability improvements over more well established methods of text input in VR.", "abstracts": [ { "abstractType": "Regular", "content": "As Virtual Reality (VR) devices become more affordable and experience a more widespread adoption, applications for this immersive technology become more diverse, increasingly also including text-based uses, such as messaging and text processing. While text input methods for VR have existed for a long time, none have reached the ease-of-use or performance of conventional, physical keyboards. In the absence of physical keyboards, touch-screen devices have adopted word-gesture keyboards, that allow users to input text through smooth motions. In this work, we introduce a word-gesture keyboard for VR that uses direct interaction with a virtual keyboard, to allow a more direct transfer of hand motion. We present a preliminary evaluation with promising results, which suggest usability improvements over more well established methods of text input in VR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As Virtual Reality (VR) devices become more affordable and experience a more widespread adoption, applications for this immersive technology become more diverse, increasingly also including text-based uses, such as messaging and text processing. While text input methods for VR have existed for a long time, none have reached the ease-of-use or performance of conventional, physical keyboards. In the absence of physical keyboards, touch-screen devices have adopted word-gesture keyboards, that allow users to input text through smooth motions. In this work, we introduce a word-gesture keyboard for VR that uses direct interaction with a virtual keyboard, to allow a more direct transfer of hand motion. We present a preliminary evaluation with promising results, which suggest usability improvements over more well established methods of text input in VR.", "fno": "572500a140", "keywords": [ "Gesture Recognition", "Handicapped Aids", "Keyboards", "Text Analysis", "Touch Sensitive Screens", "User Interfaces", "Virtual Reality", "Conventional Keyboards", "Direct Interaction Word Gesture Text Input", "Direct Transfer", "Ease Of Use", "Immersive Technology", "Input Text", "Messaging", "Physical Keyboards", "Text Input Methods", "Text Processing", "Text Based Uses", "Touch Screen Devices", "Virtual Keyboard", "Virtual Reality Devices", "VR", "Word Gesture Keyboard", "Performance Evaluation", "Keyboards", "Virtual Reality", "Usability", "Artificial Intelligence", "Text Processing", "Virtual Reality", "Text Input", "Word Gestures" ], "authors": [ { "affiliation": "University of Basel,Department of Mathematics and Computer Science,Basel,Switzerland", "fullName": "Florian Spiess", "givenName": "Florian", "surname": "Spiess", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Basel,Department of Mathematics and Computer Science,Basel,Switzerland", "fullName": "Philipp Weber", "givenName": "Philipp", "surname": "Weber", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Basel,Department of Mathematics and Computer Science,Basel,Switzerland", "fullName": "Heiko Schuldt", "givenName": "Heiko", "surname": "Schuldt", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "140-143", "year": "2022", "issn": null, "isbn": "978-1-6654-5725-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "572500a135", "articleId": "1KmFfqArfGM", "__typename": "AdjacentArticleType" }, "next": { "fno": "572500a144", "articleId": "1KmFetCHntS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446059", "title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446250", "title": "Effects of Hand Representations for Typing in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09737726", "title": "MyoKey: Inertial Motion Sensing and Gesture-based QWERTY Keyboard for Extended Realities", "doi": null, "abstractUrl": "/journal/tm/5555/01/09737726/1BQlEBR0ceY", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049665", "title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797740", "title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797754", "title": "A Capacitive-sensing Physical Keyboard for VR Text Entry", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794572", "title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a387", "title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a393", "title": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a393/1yeCVRK9bri", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyshXRzHpK", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gyslQzq07K", "doi": "10.1109/ISMAR-Adjunct.2019.000-4", "title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "normalizedTitle": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "abstract": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations is an efficient hand representation. However, these representations typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. We close this gap by comparing text entry on a standard physical keyboard and a touch-sensitive physical keyboard in a controlled user study (n=26). Our results indicate that text entry using touch-sensitive physical keyboards can be as efficient as the fingertip visualization, but that results vary between experienced and inexperienced typists.", "abstracts": [ { "abstractType": "Regular", "content": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations is an efficient hand representation. However, these representations typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. We close this gap by comparing text entry on a standard physical keyboard and a touch-sensitive physical keyboard in a controlled user study (n=26). Our results indicate that text entry using touch-sensitive physical keyboards can be as efficient as the fingertip visualization, but that results vary between experienced and inexperienced typists.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations is an efficient hand representation. However, these representations typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. We close this gap by comparing text entry on a standard physical keyboard and a touch-sensitive physical keyboard in a controlled user study (n=26). Our results indicate that text entry using touch-sensitive physical keyboards can be as efficient as the fingertip visualization, but that results vary between experienced and inexperienced typists.", "fno": "476500a387", "keywords": [ "Helmet Mounted Displays", "Keyboards", "Text Analysis", "Touch Sensitive Screens", "Virtual Reality", "Touch Sensitive Physical Keyboard", "Virtual Reality Applications", "Immersive VR Head Mounted Displays", "Standard Physical Keyboard", "Efficient Hand Representation", "Text Entry Evaluation", "On Surface Interaction", "Fingertip Visualization", "Keyboards", "Sensors", "Visualization", "Pins", "Resists", "Standards", "Task Analysis" ], "authors": [ { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Alexander Otte", "givenName": "Alexander", "surname": "Otte", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Daniel Schneider", "givenName": "Daniel", "surname": "Schneider", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Tim Menzner", "givenName": "Tim", "surname": "Menzner", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Travis Gesslein", "givenName": "Travis", "surname": "Gesslein", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Philipp Gagel", "givenName": "Philipp", "surname": "Gagel", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts", "fullName": "Jens Grubert", "givenName": "Jens", "surname": "Grubert", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "387-392", "year": "2019", "issn": null, "isbn": "978-1-7281-4765-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "476500a384", "articleId": "1gysog0WnXG", "__typename": "AdjacentArticleType" }, "next": { "fno": "476500a393", "articleId": "1gysjIlsYus", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446059", "title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2018/2666/1/266601a339", "title": "A Japanese Software Keyboard for Tablets that Reduces User Fatigue", "doi": null, "abstractUrl": "/proceedings-article/compsac/2018/266601a339/144U9b07hJP", "parentPublication": { "id": "proceedings/compsac/2018/2666/2", "title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a694", "title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a864", "title": "Efficient Special Character Entry on a Virtual Keyboard by Hand Gesture-Based Mode Switching", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a864/1JrRlYtjd9C", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a753", "title": "Design of a Programmable Touch Keyboard", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a753/1KYtpnuy7Kg", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2019/9148/0/08767420", "title": "HIBEY: Hide the Keyboard in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm", "parentPublication": { "id": "proceedings/percom/2019/9148/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797740", "title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797754", "title": "A Capacitive-sensing Physical Keyboard for VR Text Entry", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794572", "title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "181W9lQFnW5", "title": "2016 International Conference on Virtual Reality and Visualization (ICVRV)", "acronym": "icvrv", "groupId": "1800579", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "1j9x4YQ2tdS", "doi": "10.1109/ICVRV.2016.81", "title": "One Bit Mouse for Virtual Reality", "normalizedTitle": "One Bit Mouse for Virtual Reality", "abstract": "Due to the dominance of physical and cognitive health problems the group of growing age people is becoming promising in the entire population. Nowadays, Virtual reality is an emerging paradigm that is trying in context to enhance their independence and quality of life. But it is not so easy to interact with the VR applications. When you wear the virtual glasses you won't be able to see the physical world in real. The common user problem is to use the input devices (keyboard, mouse, remote control) to change the menu and access the different features of VR. But in case of people with special needs they will not be able to handle interaction with such devices. To make maximum use of virtual reality technique effective, we are proposing one bit virtual mouse. By using one key, virtual mouse provides the control access to VR application with ease as well as training environment for the users. The primary evaluation of the system is done with the experiment. The post experiment results and disable people feedback show performance of the system is satisfactory.", "abstracts": [ { "abstractType": "Regular", "content": "Due to the dominance of physical and cognitive health problems the group of growing age people is becoming promising in the entire population. Nowadays, Virtual reality is an emerging paradigm that is trying in context to enhance their independence and quality of life. But it is not so easy to interact with the VR applications. When you wear the virtual glasses you won't be able to see the physical world in real. The common user problem is to use the input devices (keyboard, mouse, remote control) to change the menu and access the different features of VR. But in case of people with special needs they will not be able to handle interaction with such devices. To make maximum use of virtual reality technique effective, we are proposing one bit virtual mouse. By using one key, virtual mouse provides the control access to VR application with ease as well as training environment for the users. The primary evaluation of the system is done with the experiment. The post experiment results and disable people feedback show performance of the system is satisfactory.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Due to the dominance of physical and cognitive health problems the group of growing age people is becoming promising in the entire population. Nowadays, Virtual reality is an emerging paradigm that is trying in context to enhance their independence and quality of life. But it is not so easy to interact with the VR applications. When you wear the virtual glasses you won't be able to see the physical world in real. The common user problem is to use the input devices (keyboard, mouse, remote control) to change the menu and access the different features of VR. But in case of people with special needs they will not be able to handle interaction with such devices. To make maximum use of virtual reality technique effective, we are proposing one bit virtual mouse. By using one key, virtual mouse provides the control access to VR application with ease as well as training environment for the users. The primary evaluation of the system is done with the experiment. The post experiment results and disable people feedback show performance of the system is satisfactory.", "fno": "5188a442", "keywords": [ "Handicapped Aids", "Mouse Controllers Computers", "Virtual Reality", "Physical Health", "Cognitive Health", "VR Applications", "Virtual Glasses", "Input Devices", "One Bit Virtual Mouse", "Control Access", "Training Environment", "Disable People Feedback", "Mice", "Glass", "Virtual Reality", "Interactive Systems", "Training", "Hardware", "Keyboards", "Virtual Reality", "Disability", "Virtual Mouse", "Human Computer Interaction" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China", "fullName": "Farzana Jabeen", "givenName": "Farzana", "surname": "Jabeen", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China", "fullName": "Linmi Tao", "givenName": "Linmi", "surname": "Tao", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., BUPT, Beijing, China", "fullName": "Linlin Tian", "givenName": "Linlin", "surname": "Tian", "__typename": "ArticleAuthorType" } ], "idPrefix": "icvrv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "442-446", "year": "2016", "issn": null, "isbn": "978-1-5090-5188-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5188a364", "articleId": "181W9od8p3j", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrais/1993/1363/0/00380805", "title": "Adding reality to the virtual", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380805/12OmNC3FGbs", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2018/2290/0/08343267", "title": "Immersive gesture interfaces for 3D map navigation in HMD-based virtual environments", "doi": null, "abstractUrl": "/proceedings-article/icoin/2018/08343267/12OmNvD8Rwt", "parentPublication": { "id": "proceedings/icoin/2018/2290/0", "title": "2018 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2007/0905/0/04161002", "title": "Interscopic User Interface Concepts for Fish Tank Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161002/12OmNvkplbf", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vissoft/2015/7526/0/07332423", "title": "Exploring software cities in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vissoft/2015/07332423/12OmNyr8Yuv", "parentPublication": { "id": "proceedings/vissoft/2015/7526/0", "title": "2015 IEEE 3rd Working Conference on Software Visualization (VISSOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2017/0831/0/0831a440", "title": "Using Real Objects for Interaction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a440/12OmNzGlREo", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/02/07833028", "title": "Augmented Reality versus Virtual Reality for 3D Object Manipulation", "doi": null, "abstractUrl": "/journal/tg/2018/02/07833028/13rRUwInvsX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a140", "title": "Direct Interaction Word-Gesture Text Input in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a140/1KmF8k8WXi8", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a171", "title": "A Qualitative Analysis of Interaction Techniques in a Virtual Reality Instruction Environment: Experiences From a Case Study", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a171/1KmFghvd14s", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrhciai/2022/9182/0/918200a134", "title": "MIND-VR: A Utility Approach of Human-Computer Interaction in Virtual Space based on Autonomous Consciousness", "doi": null, "abstractUrl": "/proceedings-article/vrhciai/2022/918200a134/1LxffWquCrK", "parentPublication": { "id": "proceedings/vrhciai/2022/9182/0", "title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794572", "title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNsbGvCZ", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "acronym": "isvri", "groupId": "1800344", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNBhpS7O", "doi": "10.1109/ISVRI.2011.5759630", "title": "Over-parameterized method on variational surface for point-based reconstruction", "normalizedTitle": "Over-parameterized method on variational surface for point-based reconstruction", "abstract": "This article tackles the problem of using varitional method for reconstructing 3D surface. We give an overview of over-parameterized method on variational surface when the 3D surface is represented by a point-based surface and a triangular mesh-based surface, and we detail the variational surface which embody used on surface reconstruction. In particular, we show how to rigorously account for efficiency in the surface reconstructing process. The key idea is to consider utilizing variational surface from a finite number of normals for surface reconstruction. We investigate the properties of the regularization functional and illustrate our technique by applying it to converge to the input shape as the number of measurements increases. In contrast to previous works our method can be considered on point-based surface and allows the use of over-parameterized scheme for 3D surface reconstruction. Experimental results show the proposed method is efficient.", "abstracts": [ { "abstractType": "Regular", "content": "This article tackles the problem of using varitional method for reconstructing 3D surface. We give an overview of over-parameterized method on variational surface when the 3D surface is represented by a point-based surface and a triangular mesh-based surface, and we detail the variational surface which embody used on surface reconstruction. In particular, we show how to rigorously account for efficiency in the surface reconstructing process. The key idea is to consider utilizing variational surface from a finite number of normals for surface reconstruction. We investigate the properties of the regularization functional and illustrate our technique by applying it to converge to the input shape as the number of measurements increases. In contrast to previous works our method can be considered on point-based surface and allows the use of over-parameterized scheme for 3D surface reconstruction. Experimental results show the proposed method is efficient.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article tackles the problem of using varitional method for reconstructing 3D surface. We give an overview of over-parameterized method on variational surface when the 3D surface is represented by a point-based surface and a triangular mesh-based surface, and we detail the variational surface which embody used on surface reconstruction. In particular, we show how to rigorously account for efficiency in the surface reconstructing process. The key idea is to consider utilizing variational surface from a finite number of normals for surface reconstruction. We investigate the properties of the regularization functional and illustrate our technique by applying it to converge to the input shape as the number of measurements increases. In contrast to previous works our method can be considered on point-based surface and allows the use of over-parameterized scheme for 3D surface reconstruction. Experimental results show the proposed method is efficient.", "fno": "05759630", "keywords": [ "Mesh Generation", "Solid Modelling", "Over Parameterized Method", "Point Based Reconstruction", "Variational Surface", "Point Based Surface", "Triangular Mesh Based Surface", "Surface Reconstructing Process", "3 D Surface Reconstruction", "Surface Reconstruction", "Surface Treatment", "Three Dimensional Displays", "Shape", "Solid Modeling", "Machine Intelligence", "Over Parameterized Method", "Variational Scheme", "Point Based Reconstruction" ], "authors": [ { "affiliation": "School of Communication and Information Engineering, Shanghai University, 200072, China", "fullName": "Longcun Jin", "givenName": "Longcun", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Communication and Information Engineering, Shanghai University, 200072, China", "fullName": "Wanggen Wan", "givenName": "Wanggen", "surname": "Wan", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Communication and Information Engineering, Shanghai University, 200072, China", "fullName": "Xiaoqing Yu", "givenName": "Xiaoqing", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Communication and Information Engineering, Shanghai University, 200072, China", "fullName": "Zhenghua Zhou", "givenName": "Zhenghua", "surname": "Zhou", "__typename": "ArticleAuthorType" } ], "idPrefix": "isvri", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "193-197", "year": "2011", "issn": null, "isbn": "978-1-4577-0054-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05759629", "articleId": "12OmNyQYtd3", "__typename": "AdjacentArticleType" }, "next": { "fno": "05759632", "articleId": "12OmNwJybR0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isdea/2012/4608/0/4608b511", "title": "Surface Reconstruction from Sparse and Mutually Intersected Contours for Freehand 3D Ultrasound Using Variational Method", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608b511/12OmNCmGNWc", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2017/2089/0/2089a118", "title": "Anisotropic Surface Reconstruction for Multiphase Fluids", "doi": null, "abstractUrl": "/proceedings-article/cw/2017/2089a118/12OmNCmpcVe", "parentPublication": { "id": "proceedings/cw/2017/2089/0", "title": "2017 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a264", "title": "3D Surface Reconstruction from Point-and-Line Cloud", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a264/12OmNrAMEVf", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a313", "title": "Out-of-Core Surface Reconstruction from Large Point Sets for Infrastructure Inspection", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a313/12OmNwp74Lc", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460427", "title": "Shape reconstruction with globally-optimized surface point selection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460427/12OmNwtEEGb", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06814976", "title": "TV-L1 Optimization for B-Spline Surface Reconstruction with Sharp Features", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06814976/12OmNyrZLDi", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/01/i0181", "title": "Variational Surface Interpolation from Sparse Point and Normal Data", "doi": null, "abstractUrl": "/journal/tp/2007/01/i0181/13rRUwI5TSf", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/08/06035704", "title": "Inference-Based Surface Reconstruction of Cluttered Environments", "doi": null, "abstractUrl": "/journal/tg/2012/08/06035704/13rRUwj7cp9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/01/04016561", "title": "Variational Surface Interpolation from Sparse Point and Normal Data", "doi": null, "abstractUrl": "/journal/tp/2007/01/04016561/13rRUy3xY9a", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a967", "title": "SSRNet: Scalable 3D Surface Reconstruction Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a967/1m3nKc80MlG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyGbI51", "title": "The 24th Southeastern Symposium on System Theory and The 3rd Annual Symposium on Communications, Signal Processing Expert Systems, and ASIC VLSI Design", "acronym": "ssst", "groupId": "1000732", "volume": "0", "displayVolume": "0", "year": "1992", "__typename": "ProceedingType" }, "article": { "id": "12OmNBqMDgU", "doi": "10.1109/SSST.1992.712350", "title": "A Study of the Surface Properties of Epitaxially Grown Indium Phosphide using Photoluminescence", "normalizedTitle": "A Study of the Surface Properties of Epitaxially Grown Indium Phosphide using Photoluminescence", "abstract": "Surface recombination of photogenerated minority carriers is one of the loss mechanism in p-n junction solar cells. Optically excited photoluminescence (PL) was used as a means of inferring the surface recombination velocity (SRV) of doped crystalline indium phosphide, which has been grown by current-controlled liquid phase epitaxy. By monitoring the PL intensity induced by two excitation wavelengths, the bulk diffusion length and surface recombination parameters may be obtained under suitable experimental conditions. Various surface treatments were performed to attempt to reduce the SRV losses. These treatments included chemical etching and thin film formation/deposition. It was observed that some treatments may result in a reduced SRV, but the effect was unstable under atmospheric exposure.", "abstracts": [ { "abstractType": "Regular", "content": "Surface recombination of photogenerated minority carriers is one of the loss mechanism in p-n junction solar cells. Optically excited photoluminescence (PL) was used as a means of inferring the surface recombination velocity (SRV) of doped crystalline indium phosphide, which has been grown by current-controlled liquid phase epitaxy. By monitoring the PL intensity induced by two excitation wavelengths, the bulk diffusion length and surface recombination parameters may be obtained under suitable experimental conditions. Various surface treatments were performed to attempt to reduce the SRV losses. These treatments included chemical etching and thin film formation/deposition. It was observed that some treatments may result in a reduced SRV, but the effect was unstable under atmospheric exposure.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Surface recombination of photogenerated minority carriers is one of the loss mechanism in p-n junction solar cells. Optically excited photoluminescence (PL) was used as a means of inferring the surface recombination velocity (SRV) of doped crystalline indium phosphide, which has been grown by current-controlled liquid phase epitaxy. By monitoring the PL intensity induced by two excitation wavelengths, the bulk diffusion length and surface recombination parameters may be obtained under suitable experimental conditions. Various surface treatments were performed to attempt to reduce the SRV losses. These treatments included chemical etching and thin film formation/deposition. It was observed that some treatments may result in a reduced SRV, but the effect was unstable under atmospheric exposure.", "fno": "00712350", "keywords": [ "Indium Phosphide", "Photoluminescence", "Surface Treatment", "Optical Surface Waves", "Chemicals", "Wavelength Measurement", "Photovoltaic Cells", "Optical Films", "Epitaxial Growth", "Surface Waves" ], "authors": [ { "affiliation": "Dept. of Electrical Engineering, North Carolina A & T State University", "fullName": "F. Niranjan", "givenName": "F.", "surname": "Niranjan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "W.J. Collis", "givenName": "W.J.", "surname": "Collis", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "A. Abul-Fadl", "givenName": "A.", "surname": "Abul-Fadl", "__typename": "ArticleAuthorType" } ], "idPrefix": "ssst", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1992-01-01T00:00:00", "pubType": "proceedings", "pages": "523,524", "year": "1992", "issn": "0094-2898", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00712349", "articleId": "12OmNBCHMMR", "__typename": "AdjacentArticleType" }, "next": { "fno": "00712352", "articleId": "12OmNC8uRk1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ccats/2015/8211/0/8211a086", "title": "Surface Roughness Measurement Application Using Multi-frame Techniques", "doi": null, "abstractUrl": "/proceedings-article/ccats/2015/8211a086/12OmNvT2pgA", "parentPublication": { "id": "proceedings/ccats/2015/8211/0", "title": "2015 International Conference on Computer Application Technologies (CCATS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139539", "title": "Surface shape reconstruction of an undulating transparent object", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139539/12OmNvsm6vi", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iqec/2005/9240/0/01560879", "title": "Photoluminescence and micro-Raman study of nanohills formed on the surface of Ge by YAG:Nd laser", "doi": null, "abstractUrl": "/proceedings-article/iqec/2005/01560879/12OmNwIpNmW", "parentPublication": { "id": "proceedings/iqec/2005/9240/0", "title": "International Quantum Electronics Conference, 2005.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2014/4261/0/4261a662", "title": "Obliquely Incidence Scattering by an Anisotropic Impedance Wedge: Surface Waves and the Diffraction of the Surface Waves", "doi": null, "abstractUrl": "/proceedings-article/isdea/2014/4261a662/12OmNxb5hul", "parentPublication": { "id": "proceedings/isdea/2014/4261/0", "title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a159", "title": "Modeling the Anisotropic Reflectance of a Surface with Microstructure Engineered to Obtain Visible Contrast After Rotation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a159/12OmNyen1xn", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2014/5179/0/06850725", "title": "Surface plasmonic properties in graphene for the variation of chemical potential", "doi": null, "abstractUrl": "/proceedings-article/iciev/2014/06850725/12OmNyugz0G", "parentPublication": { "id": "proceedings/iciev/2014/5179/0", "title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvaui/2016/5870/0/5870a037", "title": "Surface Stereo for Shallow Underwater Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvaui/2016/5870a037/12OmNyz5K1v", "parentPublication": { "id": "proceedings/cvaui/2016/5870/0", "title": "2016 ICPR 2nd Workshop on Computer Vision for Analysis of Underwater Imagery (CVAUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a218", "title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/02/07358132", "title": "On Frictional Forces between the Finger and a Textured Surface during Active Touch", "doi": null, "abstractUrl": "/journal/th/2016/02/07358132/13rRUxZzAhO", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/qe/2022/01/09880558", "title": "Quantum Control of Optically Active Artificial Atoms With Surface Acoustic Waves", "doi": null, "abstractUrl": "/journal/qe/2022/01/09880558/1Gtu21J4Zck", "parentPublication": { "id": "trans/qe", "title": "IEEE Transactions on Quantum Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy9Prj1", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNyen1xn", "doi": "10.1109/ICCVW.2017.27", "title": "Modeling the Anisotropic Reflectance of a Surface with Microstructure Engineered to Obtain Visible Contrast After Rotation", "normalizedTitle": "Modeling the Anisotropic Reflectance of a Surface with Microstructure Engineered to Obtain Visible Contrast After Rotation", "abstract": "Engineering of surface structure to obtain specific anisotropic reflectance properties has interesting applications in large scale production of plastic items. In recent work, surface structure has been engineered to obtain visible reflectance contrast when observing a surface before and after rotating it 90 degrees around its normal axis. We build an analytic anisotropic reflectance model based on the microstructure engineered to obtain such contrast. Using our model to render synthetic images, we predict the above mentioned contrasts and compare our predictions with the measurements reported in previous work. The benefit of an analytical model like the one we provide is its potential to be used in computer vision for estimating the quality of a surface sample. The quality of a sample is indicated by the resemblance of camera-based contrast measurements with contrasts predicted for an idealized surface structure. Our predictive model is also useful in optimization of the microstructure configuration, where the objective for example could be to maximize reflectance contrast.", "abstracts": [ { "abstractType": "Regular", "content": "Engineering of surface structure to obtain specific anisotropic reflectance properties has interesting applications in large scale production of plastic items. In recent work, surface structure has been engineered to obtain visible reflectance contrast when observing a surface before and after rotating it 90 degrees around its normal axis. We build an analytic anisotropic reflectance model based on the microstructure engineered to obtain such contrast. Using our model to render synthetic images, we predict the above mentioned contrasts and compare our predictions with the measurements reported in previous work. The benefit of an analytical model like the one we provide is its potential to be used in computer vision for estimating the quality of a surface sample. The quality of a sample is indicated by the resemblance of camera-based contrast measurements with contrasts predicted for an idealized surface structure. Our predictive model is also useful in optimization of the microstructure configuration, where the objective for example could be to maximize reflectance contrast.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Engineering of surface structure to obtain specific anisotropic reflectance properties has interesting applications in large scale production of plastic items. In recent work, surface structure has been engineered to obtain visible reflectance contrast when observing a surface before and after rotating it 90 degrees around its normal axis. We build an analytic anisotropic reflectance model based on the microstructure engineered to obtain such contrast. Using our model to render synthetic images, we predict the above mentioned contrasts and compare our predictions with the measurements reported in previous work. The benefit of an analytical model like the one we provide is its potential to be used in computer vision for estimating the quality of a surface sample. The quality of a sample is indicated by the resemblance of camera-based contrast measurements with contrasts predicted for an idealized surface structure. Our predictive model is also useful in optimization of the microstructure configuration, where the objective for example could be to maximize reflectance contrast.", "fno": "1034a159", "keywords": [ "Microstructure", "Rough Surfaces", "Surface Roughness", "Surface Treatment", "Optical Surface Waves", "Predictive Models", "Surface Waves" ], "authors": [ { "affiliation": null, "fullName": "Guido Tosello", "givenName": "Guido", "surname": "Tosello", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jannik Boll Nielsen", "givenName": "Jannik Boll", "surname": "Nielsen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Henrik Aanæs", "givenName": "Henrik", "surname": "Aanæs", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jeppe Revall Frisvad", "givenName": "Jeppe Revall", "surname": "Frisvad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Andrea Luongo", "givenName": "Andrea", "surname": "Luongo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Viggo Falster", "givenName": "Viggo", "surname": "Falster", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mads Brix Doest", "givenName": "Mads Brix", "surname": "Doest", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dongya Li", "givenName": "Dongya", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Francesco Regi", "givenName": "Francesco", "surname": "Regi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yang Zhang", "givenName": "Yang", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "159-165", "year": "2017", "issn": "2473-9944", "isbn": "978-1-5386-1034-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1034a153", "articleId": "12OmNBtUdGa", "__typename": "AdjacentArticleType" }, "next": { "fno": "1034a166", "articleId": "12OmNzWfp0e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206498", "title": "A unified model of specular and diffuse reflectance for rough, glossy surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206498/12OmNvAiShy", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223148", "title": "Diffuse reflection (intensity reflectance model)", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223148/12OmNwlZu1a", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999809", "title": "Critical Factors Affecting Structural Transformations in 3D IC Micro Joints", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999809/12OmNwwuDQ6", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a218", "title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/00937522", "title": "New perspectives on geometric reflection theory from rough surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937522/12OmNzmclV4", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1993/3880/0/00341163", "title": "Diffuse reflectance from rough surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1993/00341163/12OmNzwpU3S", "parentPublication": { "id": "proceedings/cvpr/1993/3880/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2021/1732/0/173200a031", "title": "Study on the surface microstructure preparation process parameters of PET based on ultrasonic embossing technology", "doi": null, "abstractUrl": "/proceedings-article/aiam/2021/173200a031/1BzTXWuESLC", "parentPublication": { "id": "proceedings/aiam/2021/1732/0", "title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/01/08758881", "title": "A Microfacet-Based Model for Photometric Stereo with General Isotropic Reflectance", "doi": null, "abstractUrl": "/journal/tp/2021/01/08758881/1byhcnOIiys", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mark/1979/3181/0/08817307", "title": "Visual inspection of metal surfaces", "doi": null, "abstractUrl": "/proceedings-article/mark/1979/08817307/1cTIRdmHBrW", "parentPublication": { "id": "proceedings/mark/1979/3181/0", "title": "1979 International Workshop on Managing Requirements Knowledge", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/04/09487523", "title": "Realistic Rendering in &#x201C;Details&#x201D;", "doi": null, "abstractUrl": "/magazine/cg/2021/04/09487523/1vg3jD3T8NG", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": null, "article": { "id": "12OmNywfKA7", "doi": "10.1109/3DV.2014.60", "title": "Surface Detection Using Round Cut", "normalizedTitle": "Surface Detection Using Round Cut", "abstract": "We propose an iterative method for detecting closed surfaces in a volumetric data, where an optimal search is performed in a graph build upon a triangular mesh. Our approach is based on previous techniques for detecting an optimal terrain-like or tubular surface employing a regular grid. Unlike similar adaptations for triangle meshes, our method is capable of capturing complex geometries by iteratively refining the surface, where we obtain a high level of robustness by applying explicit mesh processing to intermediate results. Our method uses on-surface data support, but it also exploits data information about the region inside and outside the surface. This provides additional robustness to the algorithm. We demonstrate the capabilities of the approach by detecting surfaces of CT scanned objects.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an iterative method for detecting closed surfaces in a volumetric data, where an optimal search is performed in a graph build upon a triangular mesh. Our approach is based on previous techniques for detecting an optimal terrain-like or tubular surface employing a regular grid. Unlike similar adaptations for triangle meshes, our method is capable of capturing complex geometries by iteratively refining the surface, where we obtain a high level of robustness by applying explicit mesh processing to intermediate results. Our method uses on-surface data support, but it also exploits data information about the region inside and outside the surface. This provides additional robustness to the algorithm. We demonstrate the capabilities of the approach by detecting surfaces of CT scanned objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an iterative method for detecting closed surfaces in a volumetric data, where an optimal search is performed in a graph build upon a triangular mesh. Our approach is based on previous techniques for detecting an optimal terrain-like or tubular surface employing a regular grid. Unlike similar adaptations for triangle meshes, our method is capable of capturing complex geometries by iteratively refining the surface, where we obtain a high level of robustness by applying explicit mesh processing to intermediate results. Our method uses on-surface data support, but it also exploits data information about the region inside and outside the surface. This provides additional robustness to the algorithm. We demonstrate the capabilities of the approach by detecting surfaces of CT scanned objects.", "fno": "7000b082", "keywords": [ "Surface Treatment", "Rough Surfaces", "Surface Roughness", "Cost Function", "Iterative Methods", "Search Problems", "Smoothing Methods", "Object Detection", "Image Processing", "Image Analysis" ], "authors": [ { "affiliation": null, "fullName": "Vedrana A. Dahl", "givenName": "Vedrana A.", "surname": "Dahl", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anders B. Dahl", "givenName": "Anders B.", "surname": "Dahl", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rasmus Larsen", "givenName": "Rasmus", "surname": "Larsen", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-12-01T00:00:00", "pubType": "proceedings", "pages": "82-89", "year": "2014", "issn": null, "isbn": "978-1-4799-7000-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7000b075", "articleId": "12OmNzsJ7B3", "__typename": "AdjacentArticleType" }, "next": { "fno": "7000b095", "articleId": "12OmNAkEU6z", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdh/2014/4284/0/4284a140", "title": "A New Scheme of Interpolation Subdivision Surface by Using the Bezier Curve", "doi": null, "abstractUrl": "/proceedings-article/icdh/2014/4284a140/12OmNBkP3C5", "parentPublication": { "id": "proceedings/icdh/2014/4284/0", "title": "2014 5th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccats/2015/8211/0/8211a086", "title": "Surface Roughness Measurement Application Using Multi-frame Techniques", "doi": null, "abstractUrl": "/proceedings-article/ccats/2015/8211a086/12OmNvT2pgA", "parentPublication": { "id": "proceedings/ccats/2015/8211/0", "title": "2015 International Conference on Computer Application Technologies (CCATS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cim/1990/1966/0/00128122", "title": "Control of surface topographies formed during machining", "doi": null, "abstractUrl": "/proceedings-article/cim/1990/00128122/12OmNvzJG3L", "parentPublication": { "id": "proceedings/cim/1990/1966/0", "title": "1990 Rensselaer's Second International Conference on Computer Integrated Manufacturing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsat/2015/0423/0/07478709", "title": "Optimization of Surface Quality of Mild Steel Machined by Wire EDM Using Simulated Annealing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/acsat/2015/07478709/12OmNxuXcxt", "parentPublication": { "id": "proceedings/acsat/2015/0423/0", "title": "2015 4th International Conference on Advanced Computer Science Applications and Technologies (ACSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2016/3071/0/3071a120", "title": "Road Surface Condition Distinction Method Using Reflection Intensities Obtained by Ultrasonic Sensor", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a120/12OmNz2TCuq", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a218", "title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/02/07737070", "title": "Multimodal Feature-Based Surface Material Classification", "doi": null, "abstractUrl": "/journal/th/2017/02/07737070/13rRUNvyakZ", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/02/07060666", "title": "Surface-Roughness-Based Virtual Textiles: Evaluation Using a Multi-Contactor Display", "doi": null, "abstractUrl": "/journal/th/2015/02/07060666/13rRUxly95L", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/04/v0308", "title": "Partitioning 3D Surface Meshes Using Watershed Segmentation", "doi": null, "abstractUrl": "/journal/tg/1999/04/v0308/13rRUxly9dI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545715", "title": "Layered Surface Detection for Virtual Unrolling", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545715/17D45XlyDv8", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzSh1aC", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "acronym": "icdma", "groupId": "1800272", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNz2kqrR", "doi": "10.1109/ICDMA.2012.53", "title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "normalizedTitle": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "abstract": "This paper focuses on the influence of curved surface on surface roughness measurement, the method of dichromatic speckle autocorrelation was used in measuring surface roughness, and curvature radius of surface is introduced. The rough surface and the dichromatic speckle patterns are simulated, and the effects of different radius of curved surface on speckle patterns and speckle elongation ratio are discussed. The results show that the factor of surface radius is valid in dichromatic speckle autocorrelation measurement. When the curvature radius is large, the speckle elongation is greater, the deviation of measured surface roughness is smaller. The curvature of rough surface will affect on measurement precision when we measure the roughness by the method of dichromatic speckle autocorrelation", "abstracts": [ { "abstractType": "Regular", "content": "This paper focuses on the influence of curved surface on surface roughness measurement, the method of dichromatic speckle autocorrelation was used in measuring surface roughness, and curvature radius of surface is introduced. The rough surface and the dichromatic speckle patterns are simulated, and the effects of different radius of curved surface on speckle patterns and speckle elongation ratio are discussed. The results show that the factor of surface radius is valid in dichromatic speckle autocorrelation measurement. When the curvature radius is large, the speckle elongation is greater, the deviation of measured surface roughness is smaller. The curvature of rough surface will affect on measurement precision when we measure the roughness by the method of dichromatic speckle autocorrelation", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper focuses on the influence of curved surface on surface roughness measurement, the method of dichromatic speckle autocorrelation was used in measuring surface roughness, and curvature radius of surface is introduced. The rough surface and the dichromatic speckle patterns are simulated, and the effects of different radius of curved surface on speckle patterns and speckle elongation ratio are discussed. The results show that the factor of surface radius is valid in dichromatic speckle autocorrelation measurement. When the curvature radius is large, the speckle elongation is greater, the deviation of measured surface roughness is smaller. The curvature of rough surface will affect on measurement precision when we measure the roughness by the method of dichromatic speckle autocorrelation", "fno": "4772a218", "keywords": [ "Speckle", "Rough Surfaces", "Surface Roughness", "Correlation", "Optical Surface Waves", "Surface Treatment", "Surface Waves", "Speckle Elongation", "Optical Measurement", "Surface Roughness", "Autocorrelation" ], "authors": [ { "affiliation": null, "fullName": "Zongheng Yuan", "givenName": "Zongheng", "surname": "Yuan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Huang Jing", "givenName": "Huang", "surname": "Jing", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "218-221", "year": "2012", "issn": null, "isbn": "978-1-4673-2217-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4772a214", "articleId": "12OmNvyjGfN", "__typename": "AdjacentArticleType" }, "next": { "fno": "4772a222", "articleId": "12OmNBhpS2o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icoip/2010/4252/1/4252a623", "title": "Influence of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a623/12OmNAoUTj0", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wmso/2008/3484/0/3484a326", "title": "Study on the Surface Roughness and Surface Shape Simulation Based on STEP-NC Turning", "doi": null, "abstractUrl": "/proceedings-article/wmso/2008/3484a326/12OmNB9t6vx", "parentPublication": { "id": "proceedings/wmso/2008/3484/0", "title": "Modelling, Simulation and Optimization, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccats/2015/8211/0/8211a086", "title": "Surface Roughness Measurement Application Using Multi-frame Techniques", "doi": null, "abstractUrl": "/proceedings-article/ccats/2015/8211a086/12OmNvT2pgA", "parentPublication": { "id": "proceedings/ccats/2015/8211/0", "title": "2015 International Conference on Computer Application Technologies (CCATS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/2/3521b299", "title": "A Study of Influence Factors Affecting to Surface Roughness in Stainless Steel Turning", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521b299/12OmNwbLVlg", "parentPublication": { "id": "proceedings/iccet/2009/3521/1", "title": "Computer Engineering and Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/005P1A05", "title": "Laser speckle photography for surface tampering detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/005P1A05/12OmNx6PiE0", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ihmsc/2010/4151/2/4151b298", "title": "Surface Roughness Measure Based on Average Texture Cycle", "doi": null, "abstractUrl": "/proceedings-article/ihmsc/2010/4151b298/12OmNxE2mME", "parentPublication": { "id": "proceedings/ihmsc/2010/4151/2", "title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2017/3981/0/3981a138", "title": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2017/3981a138/12OmNyen1vo", "parentPublication": { "id": "proceedings/iccnea/2017/3981/0", "title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2009/3781/0/05090842", "title": "New simulation methodology of 3D surface roughness loss for interconnects modeling", "doi": null, "abstractUrl": "/proceedings-article/date/2009/05090842/12OmNzC5T1P", "parentPublication": { "id": "proceedings/date/2009/3781/0", "title": "2009 Design, Automation &amp; Test in Europe Conference &amp; Exhibition (DATE'09)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2009/3736/1/3736a521", "title": "Intelligent Modeling and Predicting Surface Roughness in End Milling", "doi": null, "abstractUrl": "/proceedings-article/icnc/2009/3736a521/12OmNzZEArR", "parentPublication": { "id": "proceedings/icnc/2009/3736/4", "title": "2009 Fifth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2020/7081/0/708100a012", "title": "Analysis of Static Characteristics of Hydrodynamic Bearing with Different Surface Roughness", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2020/708100a012/1iERIawDD5C", "parentPublication": { "id": "proceedings/icmtma/2020/7081/0", "title": "2020 12th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WrVg22", "doi": "10.1109/CVPR.2018.00308", "title": "Deep Marching Cubes: Learning Explicit Surface Representations", "normalizedTitle": "Deep Marching Cubes: Learning Explicit Surface Representations", "abstract": "Existing learning based solutions to 3D surface prediction cannot be trained end-to-end as they operate on intermediate representations (e.g., TSDF) from which 3D surface meshes must be extracted in a post-processing step (e.g., via the marching cubes algorithm). In this paper, we investigate the problem of end-to-end 3D surface prediction. We first demonstrate that the marching cubes algorithm is not differentiable and propose an alternative differentiable formulation which we insert as a final layer into a 3D convolutional neural network. We further propose a set of loss functions which allow for training our model with sparse point supervision. Our experiments demonstrate that the model allows for predicting sub-voxel accurate 3D shapes of arbitrary topology. Additionally, it learns to complete shapes and to separate an object's inside from its outside even in the presence of sparse and incomplete ground truth. We investigate the benefits of our approach on the task of inferring shapes from 3D point clouds. Our model is flexible and can be combined with a variety of shape encoder and shape inference techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Existing learning based solutions to 3D surface prediction cannot be trained end-to-end as they operate on intermediate representations (e.g., TSDF) from which 3D surface meshes must be extracted in a post-processing step (e.g., via the marching cubes algorithm). In this paper, we investigate the problem of end-to-end 3D surface prediction. We first demonstrate that the marching cubes algorithm is not differentiable and propose an alternative differentiable formulation which we insert as a final layer into a 3D convolutional neural network. We further propose a set of loss functions which allow for training our model with sparse point supervision. Our experiments demonstrate that the model allows for predicting sub-voxel accurate 3D shapes of arbitrary topology. Additionally, it learns to complete shapes and to separate an object's inside from its outside even in the presence of sparse and incomplete ground truth. We investigate the benefits of our approach on the task of inferring shapes from 3D point clouds. Our model is flexible and can be combined with a variety of shape encoder and shape inference techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Existing learning based solutions to 3D surface prediction cannot be trained end-to-end as they operate on intermediate representations (e.g., TSDF) from which 3D surface meshes must be extracted in a post-processing step (e.g., via the marching cubes algorithm). In this paper, we investigate the problem of end-to-end 3D surface prediction. We first demonstrate that the marching cubes algorithm is not differentiable and propose an alternative differentiable formulation which we insert as a final layer into a 3D convolutional neural network. We further propose a set of loss functions which allow for training our model with sparse point supervision. Our experiments demonstrate that the model allows for predicting sub-voxel accurate 3D shapes of arbitrary topology. Additionally, it learns to complete shapes and to separate an object's inside from its outside even in the presence of sparse and incomplete ground truth. We investigate the benefits of our approach on the task of inferring shapes from 3D point clouds. Our model is flexible and can be combined with a variety of shape encoder and shape inference techniques.", "fno": "642000c916", "keywords": [ "Computer Graphics", "Convolution", "Feedforward Neural Nets", "Image Representation", "Learning Artificial Intelligence", "Mesh Generation", "Solid Modelling", "Stereo Image Processing", "Explicit Surface Representations", "Intermediate Representations", "3 D Surface Meshes", "Post Processing Step", "Marching Cubes Algorithm", "End To End 3 D Surface Prediction", "Alternative Differentiable Formulation", "3 D Convolutional Neural Network", "Sparse Point Supervision", "Sub Voxel Accurate 3 D", "3 D Point Clouds", "Shape Encoder", "Shape Inference Techniques", "Three Dimensional Displays", "Shape", "Surface Reconstruction", "Topology", "Face", "Surface Treatment", "Solid Modeling" ], "authors": [ { "affiliation": null, "fullName": "Yiyi Liao", "givenName": "Yiyi", "surname": "Liao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Simon Donné", "givenName": "Simon", "surname": "Donné", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Andreas Geiger", "givenName": "Andreas", "surname": "Geiger", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "2916-2925", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000c906", "articleId": "17D45WK5AlX", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000c926", "articleId": "17D45XacGjp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/skg/2009/3810/0/3810a458", "title": "Iso-surface Extraction and Optimization Method Based on Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/skg/2009/3810a458/12OmNBBQZrl", "parentPublication": { "id": "proceedings/skg/2009/3810/0", "title": "Semantics, Knowledge and Grid, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2004/2075/0/20750297", "title": "Contouring Medial Surface of Thin Plate Structure Using Local Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/smi/2004/20750297/12OmNx38vVX", "parentPublication": { "id": "proceedings/smi/2004/2075/0", "title": "Proceedings. Shape Modeling International 2004", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346308", "title": "Discretized Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346308/12OmNxRnvNC", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2011/1788/0/06137646", "title": "Algorithm of Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/kam/2011/06137646/12OmNyOHG7L", "parentPublication": { "id": "proceedings/kam/2011/1788/0", "title": "2011 Fourth International Symposium on Knowledge Acquisition and Modeling", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2001/1007/0/10070306", "title": "A Marching Voxels Method for Surface Rendering of Volume Data", "doi": null, "abstractUrl": "/proceedings-article/cgi/2001/10070306/12OmNzdoN6A", "parentPublication": { "id": "proceedings/cgi/2001/1007/0", "title": "Proceedings. Computer Graphics International 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010150", "title": "Edge Transformations for Improving Mesh Quality of Marching Cubes", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010150/13rRUx0gezQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/03/v0283", "title": "On Marching Cubes", "doi": null, "abstractUrl": "/journal/tg/2003/03/v0283/13rRUyYjK58", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/03/08320335", "title": "Surface Reconstruction from Discrete Indicator Functions", "doi": null, "abstractUrl": "/journal/tg/2019/03/08320335/17D45WK5Arb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2020/02/09020242", "title": "History of the Marching Cubes Algorithm", "doi": null, "abstractUrl": "/magazine/cg/2020/02/09020242/1hS2QvK5ITC", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09650726", "title": "Learning and Meshing From Deep Implicit Surface Networks Using an Efficient Implementation of Analytic Marching", "doi": null, "abstractUrl": "/journal/tp/2022/12/09650726/1zkoUjcxaDe", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYXWAF", "title": "2016 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNAYoKnD", "doi": "10.1109/VR.2016.7504719", "title": "Acting together: Joint pedestrian road crossing in an immersive virtual environment", "normalizedTitle": "Acting together: Joint pedestrian road crossing in an immersive virtual environment", "abstract": "We investigated how two people jointly coordinate their decisions and actions in a co-occupied, large-screen virtual environment. The task for participants was to physically cross a virtual road with continuous traffic without getting hit by a car. Participants performed this task either alone or with another person (see Fig.1). We found that pairs often crossed the same gap together and closely synchronized their movements when crossing. Pairs also chose larger gaps than individuals to accommodate the extra time needed to cross through gaps together. These results reveal how two people interact and coordinate their behaviors in performing whole-body, joint motions. This study also provides a foundation for future studies examining joint actions in shared VEs where participants are represented by graphic avatars.", "abstracts": [ { "abstractType": "Regular", "content": "We investigated how two people jointly coordinate their decisions and actions in a co-occupied, large-screen virtual environment. The task for participants was to physically cross a virtual road with continuous traffic without getting hit by a car. Participants performed this task either alone or with another person (see Fig.1). We found that pairs often crossed the same gap together and closely synchronized their movements when crossing. Pairs also chose larger gaps than individuals to accommodate the extra time needed to cross through gaps together. These results reveal how two people interact and coordinate their behaviors in performing whole-body, joint motions. This study also provides a foundation for future studies examining joint actions in shared VEs where participants are represented by graphic avatars.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We investigated how two people jointly coordinate their decisions and actions in a co-occupied, large-screen virtual environment. The task for participants was to physically cross a virtual road with continuous traffic without getting hit by a car. Participants performed this task either alone or with another person (see Fig.1). We found that pairs often crossed the same gap together and closely synchronized their movements when crossing. Pairs also chose larger gaps than individuals to accommodate the extra time needed to cross through gaps together. These results reveal how two people interact and coordinate their behaviors in performing whole-body, joint motions. This study also provides a foundation for future studies examining joint actions in shared VEs where participants are represented by graphic avatars.", "fno": "07504719", "keywords": [ "Roads", "Virtual Environments", "Electronic Mail", "Synchronization", "Avatars", "Glass", "Co Occupied Virtual Environments", "Immersive Virtual Environments", "Joint Action", "Pedestrian Road Crossing" ], "authors": [ { "affiliation": "The University of Iowa", "fullName": "Yuanyuan Jiang", "givenName": "Yuanyuan", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Pooya Rahimian", "givenName": "Pooya", "surname": "Rahimian", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Elizabeth E. O'Neal", "givenName": "Elizabeth E.", "surname": "O'Neal", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Jodie M. Plumert", "givenName": "Jodie M.", "surname": "Plumert", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Junghum Paul Yon", "givenName": "Junghum Paul", "surname": "Yon", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Joseph K. Kearney", "givenName": "Joseph K.", "surname": "Kearney", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Iowa", "fullName": "Luke Franzen", "givenName": "Luke", "surname": "Franzen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "193-194", "year": "2016", "issn": "2375-5334", "isbn": "978-1-5090-0836-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07504718", "articleId": "12OmNzxPTKw", "__typename": "AdjacentArticleType" }, "next": { "fno": "07504720", "articleId": "12OmNBqdr66", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ia/2016/0834/0/07932377", "title": "Simulation exploration through immersive parallel planes", "doi": null, "abstractUrl": "/proceedings-article/ia/2016/07932377/12OmNrYlmNB", "parentPublication": { "id": "proceedings/ia/2016/0834/0", "title": "2016 Workshop on Immersive Analytics (IA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504729", "title": "Influence by others' opinions: Social pressure from agents in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504729/12OmNs5rkT8", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056613", "title": "An immersive virtual environment for collaborative geovisualization", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056613/12OmNvCi45l", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2015/7334/0/7334a423", "title": "Effectiveness of an Immersive Virtual Environment (CAVE) for Teaching Pedestrian Crossing to Children with PDD-NOS", "doi": null, "abstractUrl": "/proceedings-article/icalt/2015/7334a423/12OmNznkK1X", "parentPublication": { "id": "proceedings/icalt/2015/7334/0", "title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08438908", "title": "Joint Action in a Virtual Environment: Crossing Roads with Risky vs. Safe Human and Agent Partners", "doi": null, "abstractUrl": "/journal/tg/2019/10/08438908/13rRUyfKIHV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/06/mcg2012060036", "title": "Visual Realism Enhances Realistic Response in an Immersive Virtual Environment - Part 2", "doi": null, "abstractUrl": "/magazine/cg/2012/06/mcg2012060036/13rRUygBw23", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643340", "title": "Interpersonal Affordances and Social Dynamics in Collaborative Immersive Virtual Environments: Passing Together Through Apertures", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643340/18K0nmkwheo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797967", "title": "Acting Together, Acting Stronger? Interference between Participants during Face-to-face Cooperative Interception Task", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797967/1cJ134ZOI9y", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbU3aL", "title": "Web Intelligence and Intelligent Agent Technology, International Conference on", "acronym": "wi-iatw", "groupId": "1001412", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNBtCCCs", "doi": "10.1109/WI-IATW.2007.15", "title": "Prediction of Partners' Behaviors in Agent Negotiation under Open and Dynamic Environments", "normalizedTitle": "Prediction of Partners' Behaviors in Agent Negotiation under Open and Dynamic Environments", "abstract": "Prediction of partners' behaviors in negotiation has been an active research direction in recent years in the area of multi-agent and agent system. So by employing the prediction results, agents can modify their own negotiation strategies in order to achieve an agreement much quicker or to look after much higher benefits. Even though some of prediction strategies have been proposed by researchers, most of them are based on machine learning mechanisms which require a training process in advance. However, in most circumstances, the machine learning approaches might not work well for some kinds of agents whose behaviors are excluded in the training data. In order to address this issue, we propose three regression functions to predict agents' behaviors in this paper, which are linear, power and quadratic regression functions. The experimental results illustrate that the proposed functions can estimate partners' potential behaviors successfully and efficiently in different circumstances.", "abstracts": [ { "abstractType": "Regular", "content": "Prediction of partners' behaviors in negotiation has been an active research direction in recent years in the area of multi-agent and agent system. So by employing the prediction results, agents can modify their own negotiation strategies in order to achieve an agreement much quicker or to look after much higher benefits. Even though some of prediction strategies have been proposed by researchers, most of them are based on machine learning mechanisms which require a training process in advance. However, in most circumstances, the machine learning approaches might not work well for some kinds of agents whose behaviors are excluded in the training data. In order to address this issue, we propose three regression functions to predict agents' behaviors in this paper, which are linear, power and quadratic regression functions. The experimental results illustrate that the proposed functions can estimate partners' potential behaviors successfully and efficiently in different circumstances.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Prediction of partners' behaviors in negotiation has been an active research direction in recent years in the area of multi-agent and agent system. So by employing the prediction results, agents can modify their own negotiation strategies in order to achieve an agreement much quicker or to look after much higher benefits. Even though some of prediction strategies have been proposed by researchers, most of them are based on machine learning mechanisms which require a training process in advance. However, in most circumstances, the machine learning approaches might not work well for some kinds of agents whose behaviors are excluded in the training data. In order to address this issue, we propose three regression functions to predict agents' behaviors in this paper, which are linear, power and quadratic regression functions. The experimental results illustrate that the proposed functions can estimate partners' potential behaviors successfully and efficiently in different circumstances.", "fno": "3028a379", "keywords": [ "Prediction Partner Selection Negotiation Multi Agent Systems" ], "authors": [ { "affiliation": null, "fullName": "Fenghui Ren", "givenName": "Fenghui", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Minjie Zhang", "givenName": "Minjie", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "wi-iatw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-11-01T00:00:00", "pubType": "proceedings", "pages": "379-382", "year": "2007", "issn": null, "isbn": "0-7695-3028-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3028a368", "articleId": "12OmNxR5UOH", "__typename": "AdjacentArticleType" }, "next": { "fno": "3028a387", "articleId": "12OmNyUWR3e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icic/2009/3634/1/3634a228", "title": "A Negotiation Model of Supply Chain Based on Multi-agent", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634a228/12OmNB0nWaf", "parentPublication": { "id": "icic/2009/3634/1", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iitaw/2008/3505/0/3505a133", "title": "Multi-agent Negotiation Model Based on RBF Neural Network Learning Mechanism", "doi": null, "abstractUrl": "/proceedings-article/iitaw/2008/3505a133/12OmNB8Cj1L", "parentPublication": { "id": "proceedings/iitaw/2008/3505/0", "title": "2008 International Symposium on Intelligent Information Technology Application Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2007/3078/0/30780254", "title": "Adaptive Negotiation Based on Rewards and Regret in a Multi-Agent Environment", "doi": null, "abstractUrl": "/proceedings-article/synasc/2007/30780254/12OmNCbkQCC", "parentPublication": { "id": "proceedings/synasc/2007/3078/0", "title": "Ninth International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncis/2011/4355/2/4355b024", "title": "A Survey of Agent Based Automated Negotiation", "doi": null, "abstractUrl": "/proceedings-article/ncis/2011/4355b024/12OmNrHB1TU", "parentPublication": { "id": "proceedings/ncis/2011/4355/2", "title": "Network Computing and Information Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2010/3984/0/3984a448", "title": "A Negotiation Protocol to Support Agent Argumentation and Ontology Interoperability in MAS-Based Virtual Enterprises", "doi": null, "abstractUrl": "/proceedings-article/itng/2010/3984a448/12OmNroij9P", "parentPublication": { "id": "proceedings/itng/2010/3984/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2007/2841/0/284100457", "title": "A Fuzzy-Based Approach for Partner Selection in Multi-Agent Systems", "doi": null, "abstractUrl": "/proceedings-article/icis/2007/284100457/12OmNvStcAW", "parentPublication": { "id": "proceedings/icis/2007/2841/0", "title": "2007 International Conference on Computer and Information Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisim/2008/3184/0/3184a127", "title": "Negotiation Behaviors Based on Artificial Intelligence and Social and Cognitive Human-Agent Interaction", "doi": null, "abstractUrl": "/proceedings-article/cisim/2008/3184a127/12OmNyugyYt", "parentPublication": { "id": "proceedings/cisim/2008/3184/0", "title": "7th Computer Information Systems and Industrial Management Applications (CISIM 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iat/2005/2416/0/24160619", "title": "Agent-Based Negotiation Between Partners in Loose Inter-Organizational Workflow", "doi": null, "abstractUrl": "/proceedings-article/iat/2005/24160619/12OmNzZmZkT", "parentPublication": { "id": "proceedings/iat/2005/2416/0", "title": "Proceedings. The 2005 IEEE/WIC/ACM International Conference on Intelligent Agent Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqBtiEJ", "title": "2008 International Workshop on Advanced Information Systems for Enterprises", "acronym": "iwaise", "groupId": "1001835", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNCcKQJL", "doi": "10.1109/IWAISE.2008.14", "title": "An Agent-Based Architecture for a Cooperation Information System Supporting the Homecare", "normalizedTitle": "An Agent-Based Architecture for a Cooperation Information System Supporting the Homecare", "abstract": "Nowadays, the organization of homecare arouses an increasing interest. By organization, we mean particularly the choice of the partners, distribution’s problems, tasks’ coordination, and the resolution of all participants’ resources-conflicts. The quality of the homecare relies on the competences, the qualifications of the participating partners and their organization. It is evident that agent's paradigm is the most suitable among other paradigms because it is capable of taking into account all these aspects. In this paper we would like to present an agent-based architecture for a cooperation information system that supports the homecare and contains all the necessary constituents for its functioning", "abstracts": [ { "abstractType": "Regular", "content": "Nowadays, the organization of homecare arouses an increasing interest. By organization, we mean particularly the choice of the partners, distribution’s problems, tasks’ coordination, and the resolution of all participants’ resources-conflicts. The quality of the homecare relies on the competences, the qualifications of the participating partners and their organization. It is evident that agent's paradigm is the most suitable among other paradigms because it is capable of taking into account all these aspects. In this paper we would like to present an agent-based architecture for a cooperation information system that supports the homecare and contains all the necessary constituents for its functioning", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Nowadays, the organization of homecare arouses an increasing interest. By organization, we mean particularly the choice of the partners, distribution’s problems, tasks’ coordination, and the resolution of all participants’ resources-conflicts. The quality of the homecare relies on the competences, the qualifications of the participating partners and their organization. It is evident that agent's paradigm is the most suitable among other paradigms because it is capable of taking into account all these aspects. In this paper we would like to present an agent-based architecture for a cooperation information system that supports the homecare and contains all the necessary constituents for its functioning", "fno": "3116a063", "keywords": [ "Homecare", "Cooperative Information Systems", "Multi Agent Systems", "Coordination", "Interoperability" ], "authors": [ { "affiliation": null, "fullName": "Karim Zarour", "givenName": "Karim", "surname": "Zarour", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nacereddine Zarour", "givenName": "Nacereddine", "surname": "Zarour", "__typename": "ArticleAuthorType" } ], "idPrefix": "iwaise", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-04-01T00:00:00", "pubType": "proceedings", "pages": "63-69", "year": "2008", "issn": null, "isbn": "978-0-7695-3116-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3116a053", "articleId": "12OmNBIFmwi", "__typename": "AdjacentArticleType" }, "next": { "fno": "3116a070", "articleId": "12OmNzvQHTX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsssm/2005/8971/2/01500234", "title": "Analysis and appraisal of strategies and mechanisms for outsourcing homecare", "doi": null, "abstractUrl": "/proceedings-article/icsssm/2005/01500234/12OmNApcuyd", "parentPublication": { "id": "proceedings/icsssm/2005/8971/2", "title": "International Conference on Services Systems and Services Management", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/soca/2011/0318/0/06166222", "title": "Dynamic homecare service provisioning architecture", "doi": null, "abstractUrl": "/proceedings-article/soca/2011/06166222/12OmNBSjIXO", "parentPublication": { "id": "proceedings/soca/2011/0318/0", "title": "2011 IEEE International Conference on Service-Oriented Computing and Applications (SOCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2009/3816/4/3816d418", "title": "Research on Coordination Platform for Distributed Manufacturers Using Multi-agent", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816d418/12OmNBgQFN9", "parentPublication": { "id": "proceedings/aici/2009/3816/4", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kdex/1997/8230/0/82300019", "title": "An Agent-Based Approach for Intelligent and Cooperative Systems", "doi": null, "abstractUrl": "/proceedings-article/kdex/1997/82300019/12OmNC8MsH5", "parentPublication": { "id": "proceedings/kdex/1997/8230/0", "title": "Knowledge and Data Exchange, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2011/4388/0/4388a467", "title": "Flexible Homecare Application Personalization and Integration Using Pattern-Based Service Tailoring: Supporting Independent Living of Elderly with IT", "doi": null, "abstractUrl": "/proceedings-article/cit/2011/4388a467/12OmNqIzh8h", "parentPublication": { "id": "proceedings/cit/2011/4388/0", "title": "Computer and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/broadcom/2008/3453/0/3453a215", "title": "Applications of Cooperative WSN in Homecare Systems", "doi": null, "abstractUrl": "/proceedings-article/broadcom/2008/3453a215/12OmNxQOjIu", "parentPublication": { "id": "proceedings/broadcom/2008/3453/0", "title": "Broadband Communications, Information Technology &amp; Biomedical Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/2/3496b087", "title": "Agent Decision Making for Dynamic Selection of Coordination Mechanisms", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496b087/12OmNxdDFGC", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisis/2010/3967/0/3967a526", "title": "A Pattern-Based Coordination and Test Framework for Multi-Agent Simulation of Production Automation Systems", "doi": null, "abstractUrl": "/proceedings-article/cisis/2010/3967a526/12OmNyjccAj", "parentPublication": { "id": "proceedings/cisis/2010/3967/0", "title": "2010 International Conference on Complex, Intelligent and Software Intensive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cason/2010/4202/0/4202a175", "title": "A Web-based Light Electric Vehicle for Homecare Use - A Pilot Study", "doi": null, "abstractUrl": "/proceedings-article/cason/2010/4202a175/12OmNzlly0j", "parentPublication": { "id": "proceedings/cason/2010/4202/0", "title": "Computational Aspects of Social Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2019/5686/0/568600a627", "title": "Agent-Based Approach of Multi-structures Homecare Planning Problem", "doi": null, "abstractUrl": "/proceedings-article/sitis/2019/568600a627/1j9xF9tkNuE", "parentPublication": { "id": "proceedings/sitis/2019/5686/0", "title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBQ2VPW", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "acronym": "wi-iat", "groupId": "1001411", "volume": "3", "displayVolume": "3", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNqJ8tkD", "doi": "10.1109/WI-IAT.2009.300", "title": "Cognitive Modeling of Virtual Autonomous Intelligent Agents Integrating Human Factors", "normalizedTitle": "Cognitive Modeling of Virtual Autonomous Intelligent Agents Integrating Human Factors", "abstract": "We introduce a cognitive architecture combining human factors, human cognitive modeling, decision, perception and action selection based on hierarchical task networks. We use a cognitive model that comes from phycology science to define the control mode of a virtual agent. This control mode influences the agent decisions as well as its perception, its knowledge, or its goals. It depends on its physical and cognitive states and on its personality. We determine which factors influence the agent control mode and use them to generate various behaviors according to the situational constraints. We propose a new approach for modeling the environment and new algorithms to enable our virtual characters to evolve in the environment dynamically with a certain level of credibility.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a cognitive architecture combining human factors, human cognitive modeling, decision, perception and action selection based on hierarchical task networks. We use a cognitive model that comes from phycology science to define the control mode of a virtual agent. This control mode influences the agent decisions as well as its perception, its knowledge, or its goals. It depends on its physical and cognitive states and on its personality. We determine which factors influence the agent control mode and use them to generate various behaviors according to the situational constraints. We propose a new approach for modeling the environment and new algorithms to enable our virtual characters to evolve in the environment dynamically with a certain level of credibility.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a cognitive architecture combining human factors, human cognitive modeling, decision, perception and action selection based on hierarchical task networks. We use a cognitive model that comes from phycology science to define the control mode of a virtual agent. This control mode influences the agent decisions as well as its perception, its knowledge, or its goals. It depends on its physical and cognitive states and on its personality. We determine which factors influence the agent control mode and use them to generate various behaviors according to the situational constraints. We propose a new approach for modeling the environment and new algorithms to enable our virtual characters to evolve in the environment dynamically with a certain level of credibility.", "fno": "3801c353", "keywords": [ "Cognitive Modeling", "Multi Agent Systems", "Virtual Reality", "Training Systems", "Human Factors", "Cognitive Architecture" ], "authors": [ { "affiliation": null, "fullName": "Lydie Edward", "givenName": "Lydie", "surname": "Edward", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Domitile Lourdeaux", "givenName": "Domitile", "surname": "Lourdeaux", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jean-Paul Barthès", "givenName": "Jean-Paul", "surname": "Barthès", "__typename": "ArticleAuthorType" } ], "idPrefix": "wi-iat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-09-01T00:00:00", "pubType": "proceedings", "pages": "353-356", "year": "2009", "issn": null, "isbn": "978-0-7695-3801-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3801c349", "articleId": "12OmNvk7K0B", "__typename": "AdjacentArticleType" }, "next": { "fno": "3801c357", "articleId": "12OmNvIfDQt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wi-iat/2011/4513/2/4513b409", "title": "Monitoring Gestational Diabetes Mellitus with Cognitive Agents and Agent Environments", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2011/4513b409/12OmNAJDBwA", "parentPublication": { "id": "proceedings/wi-iat/2011/4513/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2008/3262/0/3262a547", "title": "Towards Engineering Ontologies for Cognitive Profiling of Agents on the Semantic Web", "doi": null, "abstractUrl": "/proceedings-article/compsac/2008/3262a547/12OmNBlFQW8", "parentPublication": { "id": "proceedings/compsac/2008/3262/0", "title": "2008 32nd Annual IEEE International Computer Software and Applications Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iat/2006/2748/0/04052929", "title": "A Cognitive Model for Visual Attention and Its Application", "doi": null, "abstractUrl": "/proceedings-article/iat/2006/04052929/12OmNqI04R5", "parentPublication": { "id": "proceedings/iat/2006/2748/0", "title": "2006 IEEE/WIC/ACM International Conference on Intelligent Agent Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/2/3496b330", "title": "Relating Cognitive Process Models to Behavioural Models of Agents", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496b330/12OmNrAMEMk", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892349", "title": "Cognitive psychology and human factors engineering of virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892349/12OmNvT2p0e", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2012/4880/2/4880b409", "title": "A Multi Scale Cognitive Architecture to Account for the Adaptive and Reflective Nature of Behaviour", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2012/4880b409/12OmNviZlLE", "parentPublication": { "id": "proceedings/wi-iat/2012/4880/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2006/0475/1/04216382", "title": "Towards Cognitive Machines: Multiscale Measures and Analysis", "doi": null, "abstractUrl": "/proceedings-article/icci/2006/04216382/12OmNxxNbVH", "parentPublication": { "id": "proceedings/icci/2006/0475/1", "title": "Cognitive Informatics, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isads/1997/7783/0/77830025", "title": "A Hybrid Agent Model: a Reactive and Cognitive Behavior", "doi": null, "abstractUrl": "/proceedings-article/isads/1997/77830025/12OmNyUFfYx", "parentPublication": { "id": "proceedings/isads/1997/7783/0", "title": "Proceedings of the Third International Symposium on Autonomous Decentralized Systems. ISADS 97", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kmn/2002/1778/0/17780171", "title": "Dynamic Semantics of Cognitive Agent Language", "doi": null, "abstractUrl": "/proceedings-article/kmn/2002/17780171/12OmNyv7mm2", "parentPublication": { "id": "proceedings/kmn/2002/1778/0", "title": "Proceedings IEEE Workshop on Knowledge Media Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2019/05/08713837", "title": "Cognitive Computing Systems: Their Potential and the Future", "doi": null, "abstractUrl": "/magazine/co/2019/05/08713837/1a31jv3hDP2", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KmF7rVz6Y8", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KmFdlbCJji", "doi": "10.1109/AIVR56993.2022.00017", "title": "Empathizing with virtual agents: the effect of personification and general empathic tendencies", "normalizedTitle": "Empathizing with virtual agents: the effect of personification and general empathic tendencies", "abstract": "For interactions to be natural, virtual agents should understand humans&#x2019; emotions, and humans should have emotional reactions towards them. In human-to-human interaction, this is achieved through empathic processes between individuals. So, improving empathic responses towards virtual agents represents a crucial step in improving human-virtual agent interactions. This study aims to identify whether the presence of a personification story and individual differences in the ability to empathize predict the empathic response towards a virtual agent. Furthermore, it investigates the effect of previous experience with virtual agents and gender on empathy towards the virtual agent. In an experiment, participants witnessed a virtual reality scene in which a virtual agent experienced sadness. Half of the participants were previously presented with a personification story about the virtual agent, and all completed a self-report questionnaire about empathy and a post-experiment survey about their empathic response towards the virtual agent. Results showed that individual differences in empathy significantly predict the ability to empathize with the virtual agent: people who are naturally predisposed to feel more empathy towards others tend to be more empathic towards the virtual agent. The personification story, previous experience and participants&#x2019; gender did not affect the empathic response. Implications and future direction for the design of virtual agents are discussed.", "abstracts": [ { "abstractType": "Regular", "content": "For interactions to be natural, virtual agents should understand humans&#x2019; emotions, and humans should have emotional reactions towards them. In human-to-human interaction, this is achieved through empathic processes between individuals. So, improving empathic responses towards virtual agents represents a crucial step in improving human-virtual agent interactions. This study aims to identify whether the presence of a personification story and individual differences in the ability to empathize predict the empathic response towards a virtual agent. Furthermore, it investigates the effect of previous experience with virtual agents and gender on empathy towards the virtual agent. In an experiment, participants witnessed a virtual reality scene in which a virtual agent experienced sadness. Half of the participants were previously presented with a personification story about the virtual agent, and all completed a self-report questionnaire about empathy and a post-experiment survey about their empathic response towards the virtual agent. Results showed that individual differences in empathy significantly predict the ability to empathize with the virtual agent: people who are naturally predisposed to feel more empathy towards others tend to be more empathic towards the virtual agent. The personification story, previous experience and participants&#x2019; gender did not affect the empathic response. Implications and future direction for the design of virtual agents are discussed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For interactions to be natural, virtual agents should understand humans’ emotions, and humans should have emotional reactions towards them. In human-to-human interaction, this is achieved through empathic processes between individuals. So, improving empathic responses towards virtual agents represents a crucial step in improving human-virtual agent interactions. This study aims to identify whether the presence of a personification story and individual differences in the ability to empathize predict the empathic response towards a virtual agent. Furthermore, it investigates the effect of previous experience with virtual agents and gender on empathy towards the virtual agent. In an experiment, participants witnessed a virtual reality scene in which a virtual agent experienced sadness. Half of the participants were previously presented with a personification story about the virtual agent, and all completed a self-report questionnaire about empathy and a post-experiment survey about their empathic response towards the virtual agent. Results showed that individual differences in empathy significantly predict the ability to empathize with the virtual agent: people who are naturally predisposed to feel more empathy towards others tend to be more empathic towards the virtual agent. The personification story, previous experience and participants’ gender did not affect the empathic response. Implications and future direction for the design of virtual agents are discussed.", "fno": "572500a073", "keywords": [ "Behavioural Sciences Computing", "Emotion Recognition", "Human Computer Interaction", "Multi Agent Systems", "Virtual Reality", "Empathic Response", "General Empathic Tendencies", "Human Emotion Understanding", "Human To Human Interaction", "Human Virtual Agent Interactions", "Individual Differences", "Personification Story", "Virtual Reality Scene", "Human Computer Interaction", "Atmospheric Measurements", "Virtual Environments", "Particle Measurements", "Artificial Intelligence", "Virtual Reality", "Social Agents", "Emotional Rapport", "Empathy And Resonance", "Emotion In Human Computer Interaction" ], "authors": [ { "affiliation": "Utrecht University,Information and Computing Sciences,the Netherlands", "fullName": "Kim Kroes", "givenName": "Kim", "surname": "Kroes", "__typename": "ArticleAuthorType" }, { "affiliation": "Utrecht University,Information and Computing Sciences,the Netherlands", "fullName": "Isabella Saccardi", "givenName": "Isabella", "surname": "Saccardi", "__typename": "ArticleAuthorType" }, { "affiliation": "Utrecht University,Information and Computing Sciences,the Netherlands", "fullName": "Judith Masthoff", "givenName": "Judith", "surname": "Masthoff", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "73-81", "year": "2022", "issn": null, "isbn": "978-1-6654-5725-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "572500a064", "articleId": "1KmFbVCEHxm", "__typename": "AdjacentArticleType" }, "next": { "fno": "572500a082", "articleId": "1KmFfzv6fWo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wiiatw/2006/2749/0/04053265", "title": "Empathic Multiple Tutoring Agents for Multiple Learner Interface", "doi": null, "abstractUrl": "/proceedings-article/wiiatw/2006/04053265/12OmNAio73n", "parentPublication": { "id": "proceedings/wiiatw/2006/2749/0", "title": "2006 IEEE/WIC/ACM International Conference on Web Intelligence International Intelligence Agent Technology Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349570", "title": "“I can feel it too!”: Emergent empathic reactions between synthetic characters", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349570/12OmNApcucz", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2017/3091/0/3091a038", "title": "Empathic Mixed Reality: Sharing What You Feel and Interacting with What You See", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2017/3091a038/12OmNBNM97G", "parentPublication": { "id": "proceedings/isuvr/2017/3091/0", "title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2014/6887/0/06935433", "title": "AR Petite Theater: Augmented reality storybook for supporting children's empathy behavior", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935433/12OmNC1Y5qv", "parentPublication": { "id": "proceedings/Ismar-mashd/2014/6887/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aamas/2004/2092/1/20920194", "title": "Caring for Agents and Agents that Care: Building Empathic Relations with Synthetic Agents", "doi": null, "abstractUrl": "/proceedings-article/aamas/2004/20920194/12OmNyoSb90", "parentPublication": { "id": "proceedings/aamas/2004/2092/1", "title": "Autonomous Agents and Multiagent Systems, International Joint Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iatw/2006/2749/0/27490339", "title": "Empathic Multiple Tutoring Agents for Multiple Learner Interface", "doi": null, "abstractUrl": "/proceedings-article/wi-iatw/2006/27490339/12OmNzICEL1", "parentPublication": { "id": "proceedings/wi-iatw/2006/2749/0", "title": "Web Intelligence and Intelligent Agent Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2010/01/tta2010010060", "title": "Empathic Touch by Relational Agents", "doi": null, "abstractUrl": "/journal/ta/2010/01/tta2010010060/13rRUwI5Ujg", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2021/03/08642402", "title": "First Impressions Count! The Role of the Human&#x0027;s Emotional State on Rapport Established with an Empathic versus Neutral Virtual Therapist", "doi": null, "abstractUrl": "/journal/ta/2021/03/08642402/17PYEmawc80", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2022/5908/0/09953869", "title": "Computational Empathy Counteracts the Negative Effects of Anger on Creative Problem Solving", "doi": null, "abstractUrl": "/proceedings-article/acii/2022/09953869/1IAK52oWzbq", "parentPublication": { "id": "proceedings/acii/2022/5908/0", "title": "2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100c168", "title": "Emotional Features of Interactions with Empathic Agents", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100c168/1yNiET1pvl6", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1ec9BJL2", "doi": "10.1109/VR.2019.8798201", "title": "Collaborative Problem Solving in Local and Remote VR Situations", "normalizedTitle": "Collaborative Problem Solving in Local and Remote VR Situations", "abstract": "Virtual Reality supports collaboration among partners across departments and fields, independent of physical boundaries. Virtual reality applications can solve the time and cost consuming logistic problem that companies encounter when sending experts to remote locations. However, it is not yet clear how effective partners collaborate when in remote locations. In one experiment, we examined whether partners who are physically in the same room and interact with each other before they start collaborating affects performance compared to collaborators who meet and interact only within the virtual space. Participants had to solve a Rubik's cube type three-dimensional puzzle by arranging cubes that varied in color within a solution space in such a way, so that each side of the solution space showed a single color. Participants were immersed within a virtual environment and in one condition participants were collocated in the same room (local condition), while in the other one they were located in different rooms (remote condition). Results showed that collaborators in both conditions successfully completed the task but performance was better during the local compared to the remote condition.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality supports collaboration among partners across departments and fields, independent of physical boundaries. Virtual reality applications can solve the time and cost consuming logistic problem that companies encounter when sending experts to remote locations. However, it is not yet clear how effective partners collaborate when in remote locations. In one experiment, we examined whether partners who are physically in the same room and interact with each other before they start collaborating affects performance compared to collaborators who meet and interact only within the virtual space. Participants had to solve a Rubik's cube type three-dimensional puzzle by arranging cubes that varied in color within a solution space in such a way, so that each side of the solution space showed a single color. Participants were immersed within a virtual environment and in one condition participants were collocated in the same room (local condition), while in the other one they were located in different rooms (remote condition). Results showed that collaborators in both conditions successfully completed the task but performance was better during the local compared to the remote condition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality supports collaboration among partners across departments and fields, independent of physical boundaries. Virtual reality applications can solve the time and cost consuming logistic problem that companies encounter when sending experts to remote locations. However, it is not yet clear how effective partners collaborate when in remote locations. In one experiment, we examined whether partners who are physically in the same room and interact with each other before they start collaborating affects performance compared to collaborators who meet and interact only within the virtual space. Participants had to solve a Rubik's cube type three-dimensional puzzle by arranging cubes that varied in color within a solution space in such a way, so that each side of the solution space showed a single color. Participants were immersed within a virtual environment and in one condition participants were collocated in the same room (local condition), while in the other one they were located in different rooms (remote condition). Results showed that collaborators in both conditions successfully completed the task but performance was better during the local compared to the remote condition.", "fno": "08798201", "keywords": [ "Groupware", "Problem Solving", "Virtual Reality", "Virtual Environment", "Local Condition", "Remote Condition", "Collaborative Problem", "Local VR Situations", "Remote VR Situations", "Physical Boundaries", "Virtual Reality Applications", "Logistic Problem", "Remote Locations", "Virtual Space", "Rubiks Cube Type", "Task Analysis", "Collaboration", "Color", "Virtual Environments", "Headphones", "Microphones", "Collaboration", "Problem Solving", "Puzzle Task", "Social Interaction" ], "authors": [ { "affiliation": "Max Planck Institute for Biological Cybernetics", "fullName": "Adamantini Hatzipanayioti", "givenName": "Adamantini", "surname": "Hatzipanayioti", "__typename": "ArticleAuthorType" }, { "affiliation": "Max Planck Institute for Biological Cybernetics", "fullName": "Anastasia Pavlidou", "givenName": "Anastasia", "surname": "Pavlidou", "__typename": "ArticleAuthorType" }, { "affiliation": "GSaME, University of Stuttgart", "fullName": "Manuel Dixken", "givenName": "Manuel", "surname": "Dixken", "__typename": "ArticleAuthorType" }, { "affiliation": "Max Planck Institute for Biological Cybernetics", "fullName": "Heinrich H. Bülthoff", "givenName": "Heinrich H.", "surname": "Bülthoff", "__typename": "ArticleAuthorType" }, { "affiliation": "Max Planck Institute for Biological Cybernetics", "fullName": "Tobias Meilinger", "givenName": "Tobias", "surname": "Meilinger", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Industrial Engineering IAO", "fullName": "Matthias Bues", "givenName": "Matthias", "surname": "Bues", "__typename": "ArticleAuthorType" }, { "affiliation": "Amazon Research", "fullName": "Betty J. Mohler", "givenName": "Betty J.", "surname": "Mohler", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "964-965", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798052", "articleId": "1cJ0Kew3Lzy", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798321", "articleId": "1cJ0PlpMuXe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446157", "title": "VR Music", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446157/13bd1gQYgEq", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2007/2755/0/04076460", "title": "Feedback on Collaborative Skills in Remote Studio Design", "doi": null, "abstractUrl": "/proceedings-article/hicss/2007/04076460/17D45WB0qdH", "parentPublication": { "id": "proceedings/hicss/2007/2755/0", "title": "2007 40th Annual Hawaii International Conference on System Sciences (HICSS'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dcve/2018/5132/0/08637112", "title": "Distributed Architecture for Remote Collaborative Modification of Parametric CAD Data", "doi": null, "abstractUrl": "/proceedings-article/3dcve/2018/08637112/17D45WZZ7F8", "parentPublication": { "id": "proceedings/3dcve/2018/5132/0", "title": "2018 IEEE Fourth VR International Workshop on Collaborative Virtual Environments (3DCVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577177", "title": "Influence of hearing your steps and environmental sounds in VR while walking", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714054", "title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a485", "title": "Subjective and Objective Analyses of Collaboration and Co-Presence in a Virtual Reality Remote Environment", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a485/1CJcLeKILw4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a948", "title": "[DC] Designing Immersive Tools for Supporting Cognition in Remote Scientific Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a948/1CJeZLMasuY", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a787", "title": "VRDoc: Gaze-based Interactions for VR Reading Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797801", "title": "Spatial Presence in Real and Remote Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797801/1cJ10uVKWxW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09206143", "title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxoN49SQU", "doi": "10.1109/VRW50115.2020.00046", "title": "Latency Detection and Illusion in a Head-Worn Virtual Environment", "normalizedTitle": "Latency Detection and Illusion in a Head-Worn Virtual Environment", "abstract": "Through the history of virtual environments research there has been significant interest in understanding how latency in a system affects a user&#x2019;s experience. Though latency cannot be avoided, previous work has observed that there may be ranges within which small latencies are not discernible. However, the majority of the work examining latency detection thresholds was conducted using hardware and software that are no longer commonly used in contemporary research. In the current study, we examine whether similar latency tolerances exist for modern, off-the-shelf systems. We also look at the effect of increasing and decreasing latency on such tolerances. This revealed evidence of a &#x201C;latency illusion&#x201D; that presents in cases of decreasing latency resulting in subjects perceiving less latency than is actually present in the environment.", "abstracts": [ { "abstractType": "Regular", "content": "Through the history of virtual environments research there has been significant interest in understanding how latency in a system affects a user&#x2019;s experience. Though latency cannot be avoided, previous work has observed that there may be ranges within which small latencies are not discernible. However, the majority of the work examining latency detection thresholds was conducted using hardware and software that are no longer commonly used in contemporary research. In the current study, we examine whether similar latency tolerances exist for modern, off-the-shelf systems. We also look at the effect of increasing and decreasing latency on such tolerances. This revealed evidence of a &#x201C;latency illusion&#x201D; that presents in cases of decreasing latency resulting in subjects perceiving less latency than is actually present in the environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Through the history of virtual environments research there has been significant interest in understanding how latency in a system affects a user’s experience. Though latency cannot be avoided, previous work has observed that there may be ranges within which small latencies are not discernible. However, the majority of the work examining latency detection thresholds was conducted using hardware and software that are no longer commonly used in contemporary research. In the current study, we examine whether similar latency tolerances exist for modern, off-the-shelf systems. We also look at the effect of increasing and decreasing latency on such tolerances. This revealed evidence of a “latency illusion” that presents in cases of decreasing latency resulting in subjects perceiving less latency than is actually present in the environment.", "fno": "09090663", "keywords": [ "Human Factors", "Virtual Environments", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism", "Virtual Reality", "I 4 8 Scene Analysis Depth Cues", "H 5 1 Information Systems Multimedia Information Systems", "Artificial", "Augmented", "Virtual Realities", "H 1 2 Information Systems User Machine Systems", "Human Factors" ], "authors": [ { "affiliation": "University of Mississippi,High Fidelity Virtual Environments Lab Computer & Information Science", "fullName": "Collin Roth", "givenName": "Collin", "surname": "Roth", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Mississippi,High Fidelity Virtual Environments Lab Computer & Information Science", "fullName": "Ethan Luckett", "givenName": "Ethan", "surname": "Luckett", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Mississippi,High Fidelity Virtual Environments Lab Computer & Information Science", "fullName": "J. Adam Jones", "givenName": "J. Adam", "surname": "Jones", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "215-218", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090584", "articleId": "1jIxp7KBAwo", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090467", "articleId": "1jIxiLqEktG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2003/1882/0/18820141", "title": "Effect of Latency on Presence in Stressful Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2003/18820141/12OmNAFnCwJ", "parentPublication": { "id": "proceedings/vr/2003/1882/0", "title": "Proceedings IEEE Virtual Reality 2003", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504733", "title": "A low-cost, low-latency approach to dynamic immersion in occlusive head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504733/12OmNBEGYLT", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504722", "title": "Effect of head mounted display latency on human stability during quiescent standing on one foot", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504722/12OmNrJAdXj", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2003/1922/0/19220085", "title": "A Consistency Model for Evaluating Distributed Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/cw/2003/19220085/12OmNvjgWqm", "parentPublication": { "id": "proceedings/cw/2003/1922/0", "title": "Proceedings. 2003 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07384528", "title": "Visual Quality Adjustment for Volume Rendering in a Head-Tracked Virtual Environment", "doi": null, "abstractUrl": "/journal/tg/2016/04/07384528/13rRUxBrGh4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08438908", "title": "Joint Action in a Virtual Environment: Crossing Roads with Risky vs. Safe Human and Agent Partners", "doi": null, "abstractUrl": "/journal/tg/2019/10/08438908/13rRUyfKIHV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798361", "title": "Latency Measurement in Head-Mounted Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798361/1cJ1gZOI9Gg", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a124", "title": "Mental Fatigue of Long-Term Office Tasks in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a124/1gysnb0tidq", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089623", "title": "Asymmetric Effects of the Ebbinghaus Illusion on Depth Judgments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089623/1jIx9JY7KHS", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a180", "title": "Entering a new Dimension in Virtual Reality Research: An Overview of Existing Toolkits, their Features and Challenges", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a180/1yBF0L6Dd8k", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qpzz6dhLLq", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qpzC44fheg", "doi": "10.1109/AIVR50618.2020.00023", "title": "Verbal Mimicry Predicts Social Distance and Social Attraction to an Outgroup Member in Virtual Reality", "normalizedTitle": "Verbal Mimicry Predicts Social Distance and Social Attraction to an Outgroup Member in Virtual Reality", "abstract": "The present study analyzes the extent to which verbal mimicry contributes to improving outgroup perceptions in virtual reality (VR) interactions. Particularly, this study examined the interplay between avatar customization, the salience of a common ingroup identity, and verbal mimicry in 54 VR dyads comprising users from different ethnic backgrounds. Participants were asked to customize their avatars to look either like themselves or someone completely different. Participants interacted wearing either similar avatar uniforms (salient common identity) or different clothes (nonsalient identity). The linguistic style matching (LSM) algorithm was employed to calculate verbal mimicry in the communication exchanged during a joint task. The results suggested that verbal mimicry significantly predicted lesser social distance and greater social attraction towards the outgroup member. These results are discussed in terms of their contribution for potential intergroup models of avatar communication in immersive virtual environments (IVEs).", "abstracts": [ { "abstractType": "Regular", "content": "The present study analyzes the extent to which verbal mimicry contributes to improving outgroup perceptions in virtual reality (VR) interactions. Particularly, this study examined the interplay between avatar customization, the salience of a common ingroup identity, and verbal mimicry in 54 VR dyads comprising users from different ethnic backgrounds. Participants were asked to customize their avatars to look either like themselves or someone completely different. Participants interacted wearing either similar avatar uniforms (salient common identity) or different clothes (nonsalient identity). The linguistic style matching (LSM) algorithm was employed to calculate verbal mimicry in the communication exchanged during a joint task. The results suggested that verbal mimicry significantly predicted lesser social distance and greater social attraction towards the outgroup member. These results are discussed in terms of their contribution for potential intergroup models of avatar communication in immersive virtual environments (IVEs).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The present study analyzes the extent to which verbal mimicry contributes to improving outgroup perceptions in virtual reality (VR) interactions. Particularly, this study examined the interplay between avatar customization, the salience of a common ingroup identity, and verbal mimicry in 54 VR dyads comprising users from different ethnic backgrounds. Participants were asked to customize their avatars to look either like themselves or someone completely different. Participants interacted wearing either similar avatar uniforms (salient common identity) or different clothes (nonsalient identity). The linguistic style matching (LSM) algorithm was employed to calculate verbal mimicry in the communication exchanged during a joint task. The results suggested that verbal mimicry significantly predicted lesser social distance and greater social attraction towards the outgroup member. These results are discussed in terms of their contribution for potential intergroup models of avatar communication in immersive virtual environments (IVEs).", "fno": "746300a068", "keywords": [ "Avatars", "Human Computer Interaction", "Human Factors", "Social Sciences", "Verbal Mimicry", "Outgroup Member", "Virtual Reality Interactions", "Avatar Customization", "Common Ingroup Identity", "Similar Avatar Uniforms", "Salient Common Identity", "Clothes", "Social Attraction", "Social Distance Prediction", "Outgroup Perceptions", "VR Interactions", "Ethnic Backgrounds", "Linguistic Style Matching Algotihm", "LSM Algorithm", "Avatar Communication", "Immersive Virtual Environments", "Avatars", "Color", "Linguistics", "Social Factors", "Human Factors", "Diseases", "Virtual Environments", "Mimicry", "Linguistic Style Matching", "Avatar Customization", "Intergroup Contact", "Virtual Reality" ], "authors": [ { "affiliation": "Queen’s University Belfast,School of Psychology,Belfast,United Kingdom", "fullName": "Salvador Alvidrez", "givenName": "Salvador", "surname": "Alvidrez", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California at Davis,Department of communication,Davis,United States", "fullName": "Jorge Peña", "givenName": "Jorge", "surname": "Peña", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "68-73", "year": "2020", "issn": null, "isbn": "978-1-7281-7463-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "746300a060", "articleId": "1qpzCwDcDKM", "__typename": "AdjacentArticleType" }, "next": { "fno": "746300a074", "articleId": "1qpzzcA7NHa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446261", "title": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446261/13bd1gCd7T2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2019/04/08039497", "title": "A Methodology for the Automatic Extraction and Generation of Non-Verbal Signals Sequences Conveying Interpersonal Attitudes", "doi": null, "abstractUrl": "/journal/ta/2019/04/08039497/13rRUB7a0Zy", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a770", "title": "Emotional Empathy and Facial Mimicry of Avatar Faces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a770/1CJdHd5yTSM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imet/2022/7016/0/09929618", "title": "Evaluation of a Virtual Reality Learning Enviroment testbed and Non-Verbal Cue Integration", "doi": null, "abstractUrl": "/proceedings-article/imet/2022/09929618/1HYuSVN2OIM", "parentPublication": { "id": "proceedings/imet/2022/7016/0", "title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a552", "title": "Persuasive Vibrations: Effects of Speech-Based Vibrations on Persuasion, Leadership, and Co-Presence During Verbal Communication in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a552/1MNgYjAysYU", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797789", "title": "VR-Replay: Capturing and Replaying Avatars in VR for Asynchronous 3D Collaborative Design", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797789/1cJ0ZaKBbJS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090457", "title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a715", "title": "[DC] Privacy in VR: Empowering Users with Emotional Privacy from Verbal and Non-verbal Behavior of Their Avatars", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a715/1tnXsX6EMBa", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a788", "title": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a788/1tuAHZj29Q4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwJPMYe", "title": "CVPR 2011 WORKSHOPS", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNx3Zjf0", "doi": "10.1109/CVPRW.2011.5981684", "title": "Temperature distribution descriptor for robust 3D shape retrieval", "normalizedTitle": "Temperature distribution descriptor for robust 3D shape retrieval", "abstract": "Recent developments in acquisition techniques are resulting in a very rapid growth of the number of available three dimensional (3D) models across areas as diverse as engineering, medicine and biology. It is therefore of great interest to develop the efficient shape retrieval engines that, given a query object, return similar 3D objects. The performance of a shape retrieval engine is ultimately determined by the quality and characteristics of the shape descriptor used for shape representation. In this paper, we develop a novel shape descriptor, called temperature distribution (TD) descriptor, which is capable of exploring the intrinsic geometric features on the shape. It intuitively interprets the shape in an isometrically-invariant, shape-aware, noise and small topological changes insensitive way. TD descriptor is driven by by heat kernel. The TD descriptor understands the shape by evaluating the surface temperature distribution evolution with time after applying unit heat at each vertex. The TD descriptor is represented in a concise form of a one dimensional (1D) histogram, and captures enough information to robustly handle the shape matching and retrieval process. Experimental results demonstrate the effectiveness of TD descriptor within applications of 3D shape matching and searching for the models at different poses and various noise levels.", "abstracts": [ { "abstractType": "Regular", "content": "Recent developments in acquisition techniques are resulting in a very rapid growth of the number of available three dimensional (3D) models across areas as diverse as engineering, medicine and biology. It is therefore of great interest to develop the efficient shape retrieval engines that, given a query object, return similar 3D objects. The performance of a shape retrieval engine is ultimately determined by the quality and characteristics of the shape descriptor used for shape representation. In this paper, we develop a novel shape descriptor, called temperature distribution (TD) descriptor, which is capable of exploring the intrinsic geometric features on the shape. It intuitively interprets the shape in an isometrically-invariant, shape-aware, noise and small topological changes insensitive way. TD descriptor is driven by by heat kernel. The TD descriptor understands the shape by evaluating the surface temperature distribution evolution with time after applying unit heat at each vertex. The TD descriptor is represented in a concise form of a one dimensional (1D) histogram, and captures enough information to robustly handle the shape matching and retrieval process. Experimental results demonstrate the effectiveness of TD descriptor within applications of 3D shape matching and searching for the models at different poses and various noise levels.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent developments in acquisition techniques are resulting in a very rapid growth of the number of available three dimensional (3D) models across areas as diverse as engineering, medicine and biology. It is therefore of great interest to develop the efficient shape retrieval engines that, given a query object, return similar 3D objects. The performance of a shape retrieval engine is ultimately determined by the quality and characteristics of the shape descriptor used for shape representation. In this paper, we develop a novel shape descriptor, called temperature distribution (TD) descriptor, which is capable of exploring the intrinsic geometric features on the shape. It intuitively interprets the shape in an isometrically-invariant, shape-aware, noise and small topological changes insensitive way. TD descriptor is driven by by heat kernel. The TD descriptor understands the shape by evaluating the surface temperature distribution evolution with time after applying unit heat at each vertex. The TD descriptor is represented in a concise form of a one dimensional (1D) histogram, and captures enough information to robustly handle the shape matching and retrieval process. Experimental results demonstrate the effectiveness of TD descriptor within applications of 3D shape matching and searching for the models at different poses and various noise levels.", "fno": "05981684", "keywords": [ "Image Representation", "Image Retrieval", "Shape Recognition", "Temperature Distribution Descriptor", "Robust 3 D Shape Retrieval", "Acquisition Techniques", "Three Dimensional Models", "Shape Descriptor", "Shape Representation", "Temperature Distribution", "TD", "Heat Kernel", "Retrieval Process", "Shape Matching", "Shape", "Heating", "Three Dimensional Displays", "Temperature Distribution", "Noise", "Solid Modeling", "Kernel" ], "authors": [ { "affiliation": "Purdue University, West Lafayette, IN, USA", "fullName": "Yi Fang", "givenName": "Yi", "surname": "Fang", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University, West Lafayette, IN, USA", "fullName": "Mengtian Sun", "givenName": "Mengtian", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University, West Lafayette, IN, USA", "fullName": "Karthik Ramani", "givenName": "Karthik", "surname": "Ramani", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-06-01T00:00:00", "pubType": "proceedings", "pages": "9-16", "year": "2011", "issn": "2160-7508", "isbn": "978-1-4577-0529-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05981683", "articleId": "12OmNyKa67V", "__typename": "AdjacentArticleType" }, "next": { "fno": "05981685", "articleId": "12OmNx1Iw9l", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851d309", "title": "Learned Binary Spectral Shape Descriptor for 3D Shape Correspondence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d309/12OmNBp52AI", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/6/3507f411", "title": "Shape Descriptor Based on Structural Curvature Histogram for Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507f411/12OmNvAAtsc", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2016/4155/0/4155a008", "title": "3D Shape Retrieval for Articulated Models", "doi": null, "abstractUrl": "/proceedings-article/icris/2016/4155a008/12OmNvFYQK9", "parentPublication": { "id": "proceedings/icris/2016/4155/0", "title": "2016 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477648", "title": "Heat propagation contours for 3D non-rigid shape analysis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477648/12OmNvmowRp", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/2/212820765", "title": "Multiscale Fourier Descriptor for Shape-Based Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820765/12OmNyKrHk8", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a153", "title": "Heat Kernels for Non-Rigid Shape Retrieval: Sparse Representation and Efficient Classification", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a153/12OmNzVoBxV", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2007/2841/0/284100795", "title": "A Composite Descriptor for Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icis/2007/284100795/12OmNzkMlLH", "parentPublication": { "id": "proceedings/icis/2007/2841/0", "title": "2007 International Conference on Computer and Information Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/07/07526450", "title": "DeepShape: Deep-Learned Shape Descriptor for 3D Shape Retrieval", "doi": null, "abstractUrl": "/journal/tp/2017/07/07526450/13rRUyYSWmg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j350", "title": "Unsupervised Deep Shape Descriptor With Point Distribution Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j350/1m3nJW3p8Fa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09213070", "title": "Skeleton-Based 3D Model Descriptor and Its Application in Non-Rigid Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09213070/1nHRToRge76", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBDyAaZ", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyfdOPF", "doi": "10.1109/ICCV.2015.114", "title": "Multi-view Convolutional Neural Networks for 3D Shape Recognition", "normalizedTitle": "Multi-view Convolutional Neural Networks for 3D Shape Recognition", "abstract": "A longstanding question in computer vision concerns the representation of 3D shapes for recognition: should 3D shapes be represented with descriptors operating on their native 3D formats, such as voxel grid or polygon mesh, or can they be effectively represented with view-based descriptors? We address this question in the context of learning to recognize 3D shapes from a collection of their rendered views on 2D images. We first present a standard CNN architecture trained to recognize the shapes' rendered views independently of each other, and show that a 3D shape can be recognized even from a single view at an accuracy far higher than using state-of-the-art 3D shape descriptors. Recognition rates further increase when multiple views of the shapes are provided. In addition, we present a novel CNN architecture that combines information from multiple views of a 3D shape into a single and compact shape descriptor offering even better recognition performance. The same architecture can be applied to accurately recognize human hand-drawn sketches of shapes. We conclude that a collection of 2D views can be highly informative for 3D shape recognition and is amenable to emerging CNN architectures and their derivatives.", "abstracts": [ { "abstractType": "Regular", "content": "A longstanding question in computer vision concerns the representation of 3D shapes for recognition: should 3D shapes be represented with descriptors operating on their native 3D formats, such as voxel grid or polygon mesh, or can they be effectively represented with view-based descriptors? We address this question in the context of learning to recognize 3D shapes from a collection of their rendered views on 2D images. We first present a standard CNN architecture trained to recognize the shapes' rendered views independently of each other, and show that a 3D shape can be recognized even from a single view at an accuracy far higher than using state-of-the-art 3D shape descriptors. Recognition rates further increase when multiple views of the shapes are provided. In addition, we present a novel CNN architecture that combines information from multiple views of a 3D shape into a single and compact shape descriptor offering even better recognition performance. The same architecture can be applied to accurately recognize human hand-drawn sketches of shapes. We conclude that a collection of 2D views can be highly informative for 3D shape recognition and is amenable to emerging CNN architectures and their derivatives.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A longstanding question in computer vision concerns the representation of 3D shapes for recognition: should 3D shapes be represented with descriptors operating on their native 3D formats, such as voxel grid or polygon mesh, or can they be effectively represented with view-based descriptors? We address this question in the context of learning to recognize 3D shapes from a collection of their rendered views on 2D images. We first present a standard CNN architecture trained to recognize the shapes' rendered views independently of each other, and show that a 3D shape can be recognized even from a single view at an accuracy far higher than using state-of-the-art 3D shape descriptors. Recognition rates further increase when multiple views of the shapes are provided. In addition, we present a novel CNN architecture that combines information from multiple views of a 3D shape into a single and compact shape descriptor offering even better recognition performance. The same architecture can be applied to accurately recognize human hand-drawn sketches of shapes. We conclude that a collection of 2D views can be highly informative for 3D shape recognition and is amenable to emerging CNN architectures and their derivatives.", "fno": "8391a945", "keywords": [ "Three Dimensional Displays", "Shape", "Solid Modeling", "Cameras", "Image Recognition", "Computer Architecture", "Computer Vision" ], "authors": [ { "affiliation": null, "fullName": "Hang Su", "givenName": "Hang", "surname": "Su", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Subhransu Maji", "givenName": "Subhransu", "surname": "Maji", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Evangelos Kalogerakis", "givenName": "Evangelos", "surname": "Kalogerakis", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Erik Learned-Miller", "givenName": "Erik", "surname": "Learned-Miller", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-12-01T00:00:00", "pubType": "proceedings", "pages": "945-953", "year": "2015", "issn": "2380-7504", "isbn": "978-1-4673-8391-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8391a936", "articleId": "12OmNvSKNTq", "__typename": "AdjacentArticleType" }, "next": { "fno": "8391a954", "articleId": "12OmNyz5JW7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019464", "title": "Multi-view pairwise relationship learning for sketch based 3D shape retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019464/12OmNy6Zs2q", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08439008", "title": "Learning Discriminative 3D Shape Representations by View Discerning Networks", "doi": null, "abstractUrl": "/journal/tg/2019/10/08439008/13rRUIJuxpE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a264", "title": "GVCNN: Group-View Convolutional Neural Networks for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a264/17D45VObpPx", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a001", "title": "MVTN: Multi-View Transformation Network for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a001/1BmEDFUFqw0", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a397", "title": "Learning Canonical View Representation for 3D Shape Recognition with Arbitrary Views", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a397/1BmKVyG6sO4", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d731", "title": "Enhancing 2D Representation via Adjacent Views for 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d731/1hVlyfuMEg0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a949", "title": "NodeSLAM: Neural Object Descriptors for Multi-View Shape Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a949/1qyxnEJMyoo", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a005", "title": "Sketch-based 3D shape retrieval via attention", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a005/1vg7Y9U3P5S", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2020/8666/0/866600a223", "title": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icicta/2020/866600a223/1wRIvGNgH9m", "parentPublication": { "id": "proceedings/icicta/2020/8666/0", "title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "14qdcP8Ivdv", "title": "2018 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45W9KVJ2", "doi": "10.1109/3DV.2018.00038", "title": "Cross-Domain Image-Based 3D Shape Retrieval by View Sequence Learning", "normalizedTitle": "Cross-Domain Image-Based 3D Shape Retrieval by View Sequence Learning", "abstract": "We propose a cross-domain image-based 3D shape retrieval method, which learns a joint embedding space for natural images and 3D shapes in an end-to-end manner. The similarities between images and 3D shapes can be computed as the distances in this embedding space. To better encode a 3D shape, we propose a new feature aggregation method, Cross-View Convolution (CVC), which models a 3D shape as a sequence of rendered views. For bridging the gaps between images and 3D shapes, we propose a Cross-Domain Triplet Neural Network (CDTNN) that incorporates an adaptation layer to match the features from different domains better and can be trained end-to-end. In addition, we speed up the triplet training process by presenting a new fast cross-domain triplet neural network architecture. We evaluate our method on a new image to 3D shape dataset for category-level retrieval and ObjectNet3D for instance-level retrieval. Experimental results demonstrate that our method outperforms the state-of-the-art approaches in terms of retrieval performance. We also provide in-depth analysis of various design choices to further reduce the memory storage and computational cost.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a cross-domain image-based 3D shape retrieval method, which learns a joint embedding space for natural images and 3D shapes in an end-to-end manner. The similarities between images and 3D shapes can be computed as the distances in this embedding space. To better encode a 3D shape, we propose a new feature aggregation method, Cross-View Convolution (CVC), which models a 3D shape as a sequence of rendered views. For bridging the gaps between images and 3D shapes, we propose a Cross-Domain Triplet Neural Network (CDTNN) that incorporates an adaptation layer to match the features from different domains better and can be trained end-to-end. In addition, we speed up the triplet training process by presenting a new fast cross-domain triplet neural network architecture. We evaluate our method on a new image to 3D shape dataset for category-level retrieval and ObjectNet3D for instance-level retrieval. Experimental results demonstrate that our method outperforms the state-of-the-art approaches in terms of retrieval performance. We also provide in-depth analysis of various design choices to further reduce the memory storage and computational cost.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a cross-domain image-based 3D shape retrieval method, which learns a joint embedding space for natural images and 3D shapes in an end-to-end manner. The similarities between images and 3D shapes can be computed as the distances in this embedding space. To better encode a 3D shape, we propose a new feature aggregation method, Cross-View Convolution (CVC), which models a 3D shape as a sequence of rendered views. For bridging the gaps between images and 3D shapes, we propose a Cross-Domain Triplet Neural Network (CDTNN) that incorporates an adaptation layer to match the features from different domains better and can be trained end-to-end. In addition, we speed up the triplet training process by presenting a new fast cross-domain triplet neural network architecture. We evaluate our method on a new image to 3D shape dataset for category-level retrieval and ObjectNet3D for instance-level retrieval. Experimental results demonstrate that our method outperforms the state-of-the-art approaches in terms of retrieval performance. We also provide in-depth analysis of various design choices to further reduce the memory storage and computational cost.", "fno": "842500a258", "keywords": [ "Image Retrieval", "Learning Artificial Intelligence", "Neural Nets", "Rendering Computer Graphics", "Shape Recognition", "Natural Images", "Feature Aggregation Method", "Category Level Retrieval", "Object Net 3 D", "Instance Level Retrieval", "Cross Domain Image Based 3 D Shape Retrieval Method", "View Sequence Learning", "Cross View Convolution", "Triplet Training Process", "Cross Domain Triplet Neural Network Architecture", "Memory Storage", "Computational Cost", "Three Dimensional Displays", "Shape", "Convolution", "Solid Modeling", "Neural Networks", "Training", "Feature Extraction", "Image Based 3 D Shape Retrieval", "View Sequence Learning", "Triplet Loss" ], "authors": [ { "affiliation": null, "fullName": "Tang Lee", "givenName": "Tang", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yen-Liang Lin", "givenName": "Yen-Liang", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hungyueh Chiang", "givenName": "Hungyueh", "surname": "Chiang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ming-Wei Chiu", "givenName": "Ming-Wei", "surname": "Chiu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Winston Hsu", "givenName": "Winston", "surname": "Hsu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Polly Huang", "givenName": "Polly", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-09-01T00:00:00", "pubType": "proceedings", "pages": "258-266", "year": "2018", "issn": null, "isbn": "978-1-5386-8425-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "842500a248", "articleId": "17D45WgziSk", "__typename": "AdjacentArticleType" }, "next": { "fno": "842500a267", "articleId": "17D45XtvpcU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2016/0641/0/07477652", "title": "3D shape retrieval using a single depth image from low-cost sensors", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477652/12OmNs0C9SV", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2016/4155/0/4155a008", "title": "3D Shape Retrieval for Articulated Models", "doi": null, "abstractUrl": "/proceedings-article/icris/2016/4155a008/12OmNvFYQK9", "parentPublication": { "id": "proceedings/icris/2016/4155/0", "title": "2016 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060088", "title": "Sketch-Based Articulated 3D Shape Retrieval", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060088/13rRUwfqpG7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/07/07526450", "title": "DeepShape: Deep-Learned Shape Descriptor for 3D Shape Retrieval", "doi": null, "abstractUrl": "/journal/tp/2017/07/07526450/13rRUyYSWmg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000b945", "title": "Triplet-Center Loss for Multi-view 3D Object Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000b945/17D45WrVg1l", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1385", "title": "Single Image 3D Shape Retrieval via Cross-Modal Instance and Category Contrastive Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1385/1BmG6MfML1m", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a383", "title": "Structure-Aware 3D VR Sketch to 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a383/1KYsqgmUniE", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d731", "title": "Enhancing 2D Representation via Adjacent Views for 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d731/1hVlyfuMEg0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102925", "title": "Cross-Modal Guidance Network For Sketch-Based 3d Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102925/1kwqTrDSXF6", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WIXbNW", "doi": "10.1109/CVPR.2018.00323", "title": "Pixels, Voxels, and Views: A Study of Shape Representations for Single View 3D Object Shape Prediction", "normalizedTitle": "Pixels, Voxels, and Views: A Study of Shape Representations for Single View 3D Object Shape Prediction", "abstract": "The goal of this paper is to compare surface-based and volumetric 3D object shape representations, as well as viewer-centered and object-centered reference frames for single-view 3D shape prediction. We propose a new algorithm for predicting depth maps from multiple viewpoints, with a single depth or RGB image as input. By modifying the network and the way models are evaluated, we can directly compare the merits of voxels vs. surfaces and viewer-centered vs. object-centered for familiar vs. unfamiliar objects, as predicted from RGB or depth images. Among our findings, we show that surface-based methods outperform voxel representations for objects from novel classes and produce higher resolution outputs. We also find that using viewer-centered coordinates is advantageous for novel objects, while object-centered representations are better for more familiar objects. Interestingly, the coordinate frame significantly affects the shape representation learned, with object-centered placing more importance on implicitly recognizing the object category and viewer-centered producing shape representations with less dependence on category recognition.", "abstracts": [ { "abstractType": "Regular", "content": "The goal of this paper is to compare surface-based and volumetric 3D object shape representations, as well as viewer-centered and object-centered reference frames for single-view 3D shape prediction. We propose a new algorithm for predicting depth maps from multiple viewpoints, with a single depth or RGB image as input. By modifying the network and the way models are evaluated, we can directly compare the merits of voxels vs. surfaces and viewer-centered vs. object-centered for familiar vs. unfamiliar objects, as predicted from RGB or depth images. Among our findings, we show that surface-based methods outperform voxel representations for objects from novel classes and produce higher resolution outputs. We also find that using viewer-centered coordinates is advantageous for novel objects, while object-centered representations are better for more familiar objects. Interestingly, the coordinate frame significantly affects the shape representation learned, with object-centered placing more importance on implicitly recognizing the object category and viewer-centered producing shape representations with less dependence on category recognition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The goal of this paper is to compare surface-based and volumetric 3D object shape representations, as well as viewer-centered and object-centered reference frames for single-view 3D shape prediction. We propose a new algorithm for predicting depth maps from multiple viewpoints, with a single depth or RGB image as input. By modifying the network and the way models are evaluated, we can directly compare the merits of voxels vs. surfaces and viewer-centered vs. object-centered for familiar vs. unfamiliar objects, as predicted from RGB or depth images. Among our findings, we show that surface-based methods outperform voxel representations for objects from novel classes and produce higher resolution outputs. We also find that using viewer-centered coordinates is advantageous for novel objects, while object-centered representations are better for more familiar objects. Interestingly, the coordinate frame significantly affects the shape representation learned, with object-centered placing more importance on implicitly recognizing the object category and viewer-centered producing shape representations with less dependence on category recognition.", "fno": "642000d061", "keywords": [ "Image Colour Analysis", "Image Representation", "Image Resolution", "Learning Artificial Intelligence", "Object Recognition", "Stereo Image Processing", "Single View 3 D Object Shape Prediction", "Volumetric 3 D Object Shape Representations", "Object Centered Reference Frames", "Single View 3 D Shape Prediction", "Depth Maps", "RGB Image", "Depth Images", "Surface Based Methods Outperform Voxel Representations", "Viewer Centered Coordinates", "Object Centered Representations", "Category Recognition", "Shape Representation Learning", "Shape", "Three Dimensional Displays", "Solid Modeling", "Predictive Models", "Decoding", "Training", "Automobiles" ], "authors": [ { "affiliation": null, "fullName": "Daeyun Shin", "givenName": "Daeyun", "surname": "Shin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Charless C. Fowlkes", "givenName": "Charless C.", "surname": "Fowlkes", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Derek Hoiem", "givenName": "Derek", "surname": "Hoiem", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "3061-3069", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000d051", "articleId": "17D45WODaoS", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000d070", "articleId": "17D45WK5Aog", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/1992/2910/0/00201525", "title": "Pairwise representations of shape", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201525/12OmNvjyxw1", "parentPublication": { "id": "proceedings/icpr/1992/2910/0", "title": "1992 11th IAPR International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139528", "title": "Qualitative 3-D shape reconstruction using distributed aspect graph matching", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139528/12OmNzZWbHE", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1983/06/04767456", "title": "Viewer Independent Shape Recognition", "doi": null, "abstractUrl": "/journal/tp/1983/06/04767456/13rRUwhpBEM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/11/ttp2013112608", "title": "Detailed 3D Representations for Object Recognition and Modeling", "doi": null, "abstractUrl": "/journal/tp/2013/11/ttp2013112608/13rRUy3xY3N", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a060", "title": "Learning Pose Specific Representations by Predicting Different Views", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a060/17D45VTRoCi", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c172", "title": "3D Scene Reconstruction With Multi-Layer Depth and Epipolar Transformers", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c172/1hVlfLRJFS0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i937", "title": "Shapeglot: Learning Language for Shape Differentiation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i937/1hVluK62cKs", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800l1781", "title": "Recognizing Objects From Any View With Object and Viewer-Centered Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800l1781/1m3navJhp4Y", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3648", "title": "Motion Representations for Articulated Animation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3648/1yeJdY5M4EM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900m2482", "title": "CoCoNets: Continuous Contrastive 3D Scene Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900m2482/1yeKStk2Rxu", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wRIq5ih6ms", "title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "acronym": "icicta", "groupId": "1002487", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1wRIvGNgH9m", "doi": "10.1109/ICICTA51737.2020.00054", "title": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks", "normalizedTitle": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks", "abstract": "Searching for relevant 3D models based on hand-drawn sketches is important for many applications, such as sketch-based 3D modeling and recognition. Sketch-based shape retrieval (SBSR) has become a hot research spot in the field of model retrieval, pattern recognition, and computer vision. 3D deep representation based on Convolutional Neural Network (CNN) enables significant performance improvement over state-of-the-arts in task of 3D shape retrieval. Motivated by this, we proposed a sketch-based 3D model retrieval algorithm by utilizing representative views and CNN feature matching. The representative views are obtained by viewpoint entropy. The main idea of the method is that the hand-drawn sketch can be achieved according to a viewpoint of the 3D model. Thus the sketch and the projection of model from same class are similar. Therefore, we filter a certain amount of view as representative view to reduce the computational complexity and improve the accuracy. We extract CNN descriptors as features for representative view of each object. Our experiments on Shape Retrieval Contest (SHREC) 2012 database and SHREC 2013 database demonstrate that our method is better than state-of-the-art approaches.", "abstracts": [ { "abstractType": "Regular", "content": "Searching for relevant 3D models based on hand-drawn sketches is important for many applications, such as sketch-based 3D modeling and recognition. Sketch-based shape retrieval (SBSR) has become a hot research spot in the field of model retrieval, pattern recognition, and computer vision. 3D deep representation based on Convolutional Neural Network (CNN) enables significant performance improvement over state-of-the-arts in task of 3D shape retrieval. Motivated by this, we proposed a sketch-based 3D model retrieval algorithm by utilizing representative views and CNN feature matching. The representative views are obtained by viewpoint entropy. The main idea of the method is that the hand-drawn sketch can be achieved according to a viewpoint of the 3D model. Thus the sketch and the projection of model from same class are similar. Therefore, we filter a certain amount of view as representative view to reduce the computational complexity and improve the accuracy. We extract CNN descriptors as features for representative view of each object. Our experiments on Shape Retrieval Contest (SHREC) 2012 database and SHREC 2013 database demonstrate that our method is better than state-of-the-art approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Searching for relevant 3D models based on hand-drawn sketches is important for many applications, such as sketch-based 3D modeling and recognition. Sketch-based shape retrieval (SBSR) has become a hot research spot in the field of model retrieval, pattern recognition, and computer vision. 3D deep representation based on Convolutional Neural Network (CNN) enables significant performance improvement over state-of-the-arts in task of 3D shape retrieval. Motivated by this, we proposed a sketch-based 3D model retrieval algorithm by utilizing representative views and CNN feature matching. The representative views are obtained by viewpoint entropy. The main idea of the method is that the hand-drawn sketch can be achieved according to a viewpoint of the 3D model. Thus the sketch and the projection of model from same class are similar. Therefore, we filter a certain amount of view as representative view to reduce the computational complexity and improve the accuracy. We extract CNN descriptors as features for representative view of each object. Our experiments on Shape Retrieval Contest (SHREC) 2012 database and SHREC 2013 database demonstrate that our method is better than state-of-the-art approaches.", "fno": "866600a223", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Feature Extraction", "Image Matching", "Image Representation", "Image Retrieval", "Shape Recognition", "Solid Modelling", "Stereo Image Processing", "Computer Vision", "Pattern Recognition", "SHREC 2013 Database", "Shape Retrieval Contest 2012 Database", "Computational Complexity", "Viewpoint Entropy", "SBSR", "3 D Deep Representation", "Convolutional Neural Networks", "Multisilhouette View", "Sketch Based 3 D Shape Retrieval", "Hand Drawn Sketch", "CNN Descriptors", "Representative View", "Sketch Based 3 D Model Retrieval Algorithm", "Solid Modeling", "Three Dimensional Displays", "Shape", "Databases", "Computational Modeling", "Semantics", "Feature Extraction", "Sketch", "3 D Shape Retrieval", "Convolutional Neural Network", "Representative View", "Entropy" ], "authors": [ { "affiliation": "The 41th Research Institute of China Electronic Technology Corporation (CETC),Qingdao,China,266000", "fullName": "Song Yang", "givenName": "Song", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icicta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "223-226", "year": "2020", "issn": null, "isbn": "978-1-7281-8666-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "866600a219", "articleId": "1wRIA3ZhT0Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "866600a227", "articleId": "1wRIxS4yaxG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209e570", "title": "Sketch-Based 3D Model Retrieval via Multi-feature Fusion", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209e570/12OmNBOCWrV", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2016/4571/0/4571a173", "title": "Similarity Retrieval of 3D Models with Query by Clay Sketch", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a173/12OmNvk7JML", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019464", "title": "Multi-view pairwise relationship learning for sketch based 3D shape retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019464/12OmNy6Zs2q", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060088", "title": "Sketch-Based Articulated 3D Shape Retrieval", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060088/13rRUwfqpG7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/07/07526450", "title": "DeepShape: Deep-Learned Shape Descriptor for 3D Shape Retrieval", "doi": null, "abstractUrl": "/journal/tp/2017/07/07526450/13rRUyYSWmg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a311", "title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2016/4847/0/07900083", "title": "3D sketch-based 3D model retrieval with convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/icpr/2016/07900083/1gysq8EnfHi", "parentPublication": { "id": "proceedings/icpr/2016/4847/0", "title": "2016 23rd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09007505", "title": "Sketch Augmentation-Driven Shape Retrieval Learning Framework Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2021/08/09007505/1hJKlMJzueI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a081", "title": "Towards 3D VR-Sketch to 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a081/1qyxlDtR0Ji", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a184", "title": "Deep 3D Shape Reconstruction from Single-View Sketch Image", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a184/1uGY2GTiIda", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy2agRS", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "acronym": "cad-graphics", "groupId": "1001488", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNCmpcGZ", "doi": "10.1109/CADGraphics.2013.78", "title": "InSide: Interactive Sketching for Image Database Exploration", "normalizedTitle": "InSide: Interactive Sketching for Image Database Exploration", "abstract": "We propose an interactive sketching tool for exploring image database, called InSide. Our main contribution is a new solution of interactive image exploration that dynamically adapts to users' sketching and provides mixed feedback. A position-aware matching approach is proposed for InSide in order to support translation-free sketch searching. Based on demonstrated results, our method outperforms state-of-the-art approaches in aspects of user interface and matching results.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an interactive sketching tool for exploring image database, called InSide. Our main contribution is a new solution of interactive image exploration that dynamically adapts to users' sketching and provides mixed feedback. A position-aware matching approach is proposed for InSide in order to support translation-free sketch searching. Based on demonstrated results, our method outperforms state-of-the-art approaches in aspects of user interface and matching results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an interactive sketching tool for exploring image database, called InSide. Our main contribution is a new solution of interactive image exploration that dynamically adapts to users' sketching and provides mixed feedback. A position-aware matching approach is proposed for InSide in order to support translation-free sketch searching. Based on demonstrated results, our method outperforms state-of-the-art approaches in aspects of user interface and matching results.", "fno": "06815042", "keywords": [ "Image Edge Detection", "User Interfaces", "Image Databases", "Indexes", "Real Time Systems", "Image Segmentation", "Interactive Image Search", "Sketching" ], "authors": [ { "affiliation": null, "fullName": "Hongxin Zhang", "givenName": "Hongxin", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dongyu Liu", "givenName": "Dongyu", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Changhan Wang", "givenName": "Changhan", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cad-graphics", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-11-01T00:00:00", "pubType": "proceedings", "pages": "423-424", "year": "2013", "issn": null, "isbn": "978-1-4799-2576-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06815041", "articleId": "12OmNwdtwl3", "__typename": "AdjacentArticleType" }, "next": { "fno": "06815043", "articleId": "12OmNyS6RJh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a927", "title": "Expressive Image Sketching with Two-Layer Image Features", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a927/12OmNBTs7y9", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d049", "title": "An Interactive Sketching Modeling System: Sketch3D", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d049/12OmNroij4Y", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/c5/2007/2806/0/04144947", "title": "DR. GEO II: Adding Interactivity Planes in Interactive Dynamic Geometry", "doi": null, "abstractUrl": "/proceedings-article/c5/2007/04144947/12OmNwCJOVs", "parentPublication": { "id": "proceedings/c5/2007/2806/0", "title": "Fifth International Conference on Creating, Connecting and Collaborating through Computing (C5 '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cts/2016/2300/0/07871004", "title": "Sketching Gesture-Based Applications in a Collaborative Working Environment with Wall-Sized Displays", "doi": null, "abstractUrl": "/proceedings-article/cts/2016/07871004/12OmNxbEtOu", "parentPublication": { "id": "proceedings/cts/2016/2300/0", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444806", "title": "In-Place Sketching for content authoring in Augmented Reality games", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444806/12OmNxveNJV", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2010/6846/0/05444729", "title": "Keynote address: Interactive \"smart\" computers", "doi": null, "abstractUrl": "/proceedings-article/3dui/2010/05444729/12OmNyqRn58", "parentPublication": { "id": "proceedings/3dui/2010/6846/0", "title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08421591", "title": "Model-Guided 3D Sketching", "doi": null, "abstractUrl": "/journal/tg/2019/10/08421591/13rRUEgs2Mb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/06/v1386", "title": "Generating Graphs for Visual Analytics through Interactive Sketching", "doi": null, "abstractUrl": "/journal/tg/2006/06/v1386/13rRUwcS1CP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/03/mcg2012030059", "title": "Sketch-n-Stretch: Sketching Animations Using Cutouts", "doi": null, "abstractUrl": "/magazine/cg/2012/03/mcg2012030059/13rRUyeTVks", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a700", "title": "Interactive Sketching of Mannequin Poses", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a700/1KYsucwMe5i", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqH9hnv", "title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)", "acronym": "coginf", "groupId": "1000097", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNvDZEYX", "doi": "10.1109/COGINF.2010.5599690", "title": "Cognitive garment design interface using user behavior tree model", "normalizedTitle": "Cognitive garment design interface using user behavior tree model", "abstract": "An effective user interface helps to hinge on ideas and imagination from fashion designers and most importantly express their artworks with their flair. Shape, material, color, movement and flow - all these qualities give a piece of clothing its uniqueness, and the designer uses drawings to communicate his intentions. Sketches of various views of the garment provide the preliminary clues needed for bulk manufacturing. However, it is very difficult to develop a common user interface platform even as intuitive as sketching interface, since different designers have different senses and habits to work on their drawings. In this paper, we focus on this sketching issues and propose a user behavior tree (UBT) model that helps to return the corresponding shapes according to the preference of user. Also, in the front-tier, we provide a 3D user interface for editing the clothing panels, adjusting the sewing lines and simulating the garment design. Experiment results show the effectiveness and efficiency of the proposed model.", "abstracts": [ { "abstractType": "Regular", "content": "An effective user interface helps to hinge on ideas and imagination from fashion designers and most importantly express their artworks with their flair. Shape, material, color, movement and flow - all these qualities give a piece of clothing its uniqueness, and the designer uses drawings to communicate his intentions. Sketches of various views of the garment provide the preliminary clues needed for bulk manufacturing. However, it is very difficult to develop a common user interface platform even as intuitive as sketching interface, since different designers have different senses and habits to work on their drawings. In this paper, we focus on this sketching issues and propose a user behavior tree (UBT) model that helps to return the corresponding shapes according to the preference of user. Also, in the front-tier, we provide a 3D user interface for editing the clothing panels, adjusting the sewing lines and simulating the garment design. Experiment results show the effectiveness and efficiency of the proposed model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An effective user interface helps to hinge on ideas and imagination from fashion designers and most importantly express their artworks with their flair. Shape, material, color, movement and flow - all these qualities give a piece of clothing its uniqueness, and the designer uses drawings to communicate his intentions. Sketches of various views of the garment provide the preliminary clues needed for bulk manufacturing. However, it is very difficult to develop a common user interface platform even as intuitive as sketching interface, since different designers have different senses and habits to work on their drawings. In this paper, we focus on this sketching issues and propose a user behavior tree (UBT) model that helps to return the corresponding shapes according to the preference of user. Also, in the front-tier, we provide a 3D user interface for editing the clothing panels, adjusting the sewing lines and simulating the garment design. Experiment results show the effectiveness and efficiency of the proposed model.", "fno": "05599690", "keywords": [ "CAD", "Computer Graphics", "Production Engineering Computing", "Textile Industry", "Trees Mathematics", "User Interfaces", "Cognitive Garment Design Interface", "User Behavior Tree Model", "Bulk Manufacturing", "Sketching Interface", "CAD Systems", "Shape", "Clothing", "Solid Modeling", "Three Dimensional Displays", "Algorithm Design And Analysis", "User Interfaces", "Heuristic Algorithms" ], "authors": [ { "affiliation": "Department of Computing, The Hong Kong Polytechnic University, Hung Hom, Kowloon, Hong Kong", "fullName": "Shuang Liang", "givenName": "Shuang", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computing, The Hong Kong Polytechnic University, Hung Hom, Kowloon, Hong Kong", "fullName": "Eddie C. L. Chan", "givenName": "Eddie C. L.", "surname": "Chan", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computing, The Hong Kong Polytechnic University, Hung Hom, Kowloon, Hong Kong", "fullName": "George Baciu", "givenName": "George", "surname": "Baciu", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computing, The Hong Kong Polytechnic University, Hung Hom, Kowloon, Hong Kong", "fullName": "Rong-Hua Li", "givenName": "Rong-Hua", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "coginf", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-07-01T00:00:00", "pubType": "proceedings", "pages": "496-500", "year": "2010", "issn": null, "isbn": "978-1-4244-8042-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05599689", "articleId": "12OmNz2C1lF", "__typename": "AdjacentArticleType" }, "next": { "fno": "05599687", "articleId": "12OmNAlvHVp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iswc/2012/4697/0/4697a064", "title": "Garment Positioning and Drift in Garment-Integrated Wearable Sensing", "doi": null, "abstractUrl": "/proceedings-article/iswc/2012/4697a064/12OmNBQC8d6", "parentPublication": { "id": "proceedings/iswc/2012/4697/0", "title": "2012 16th International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci-cc/2011/1695/0/06016163", "title": "A graph modeling and matching method for sketch-based garment panel design", "doi": null, "abstractUrl": "/proceedings-article/icci-cc/2011/06016163/12OmNx5GU0A", "parentPublication": { "id": "proceedings/icci-cc/2011/1695/0", "title": "2011 10th IEEE International Conference on Cognitive Informatics & Cognitive Computing (ICCI-CC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2017/0560/0/08026317", "title": "Multimedia ontology based complementary garment recommendation", "doi": null, "abstractUrl": "/proceedings-article/icmew/2017/08026317/12OmNxveNPZ", "parentPublication": { "id": "proceedings/icmew/2017/0560/0", "title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a267", "title": "Garment Design System Based on Body Model", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a267/12OmNzYwc0c", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbe/2021/0099/0/009900a197", "title": "Strategic Analysis of Green Marketing in Chinese Textile and Garment Industry under the Background of Big Data", "doi": null, "abstractUrl": "/proceedings-article/icitbe/2021/009900a197/1AH7VcB1ihi", "parentPublication": { "id": "proceedings/icitbe/2021/0099/0", "title": "2021 International Conference on Information Technology and Biomedical Engineering (ICITBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbase/2021/2709/0/270900a162", "title": "Preliminary Discussion on Production Scheduling Optimization of Garment Intelligent Manufacturing System based on Big Data", "doi": null, "abstractUrl": "/proceedings-article/icbase/2021/270900a162/1AH8hgDDtBu", "parentPublication": { "id": "proceedings/icbase/2021/2709/0", "title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a022", "title": "Garment Ideation: Iterative View-Aware Sketch-Based Garment Modeling", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a022/1KYsti3axvq", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3cbit/2022/9225/0/922500a105", "title": "The Application of Curve Model in Garment Pattern Design", "doi": null, "abstractUrl": "/proceedings-article/3cbit/2022/922500a105/1La4KyFK9z2", "parentPublication": { "id": "proceedings/3cbit/2022/9225/0", "title": "2022 International Conference on Cloud Computing, Big Data and Internet of Things (3CBIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102718", "title": "Fine-Grained Garment Parsing: A Body Generation Approach", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102718/1kwrj147bYA", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2022/01/09667322", "title": "Garment Style Creator: Using StarGAN for Image-to-Image Translation of Multidomain Garments", "doi": null, "abstractUrl": "/magazine/mu/2022/01/09667322/1zMCigzYI6c", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwB2dUd", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNx7G663", "doi": "10.1109/3DUI.2016.7460073", "title": "3D sketching on interactively unfolded vascular structures for treatment planning", "normalizedTitle": "3D sketching on interactively unfolded vascular structures for treatment planning", "abstract": "In clinical practice, sketches support physicians in treatment planning. For example, they are employed as direct annotations in medical image data. However, this approach leads to occlusions in case of spatially complex 3D representations of anatomical structures such as vascular systems. To overcome this limitation, we developed a framework which enables the physician to create annotations by freely sketching in 3D environment. We solve the problem of occlusions by an animated representation of the original and unfolded vascular structure with interactive unfolding. For this, we use a semi-immersive stereoscopic display and a stylus with ray-based interaction techniques.", "abstracts": [ { "abstractType": "Regular", "content": "In clinical practice, sketches support physicians in treatment planning. For example, they are employed as direct annotations in medical image data. However, this approach leads to occlusions in case of spatially complex 3D representations of anatomical structures such as vascular systems. To overcome this limitation, we developed a framework which enables the physician to create annotations by freely sketching in 3D environment. We solve the problem of occlusions by an animated representation of the original and unfolded vascular structure with interactive unfolding. For this, we use a semi-immersive stereoscopic display and a stylus with ray-based interaction techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In clinical practice, sketches support physicians in treatment planning. For example, they are employed as direct annotations in medical image data. However, this approach leads to occlusions in case of spatially complex 3D representations of anatomical structures such as vascular systems. To overcome this limitation, we developed a framework which enables the physician to create annotations by freely sketching in 3D environment. We solve the problem of occlusions by an animated representation of the original and unfolded vascular structure with interactive unfolding. For this, we use a semi-immersive stereoscopic display and a stylus with ray-based interaction techniques.", "fno": "07460073", "keywords": [ "Three Dimensional Displays", "Medical Services", "Biomedical Imaging", "Planning", "Animation", "Solid Modeling", "J 3 LIFE AND MEDICAL SCIENCES Medical Information Systems", "I 3 6 COMPUTER GRAPHICS Methodology And Techniques Interaction Techniques", "I 3 7 COMPUTER GRAPHICS Three Dimensional Graphics And Realism Animation" ], "authors": [ { "affiliation": "Dept. of Simulation and Graphics, University of Magdeburg, Germany", "fullName": "Patrick Saalfeld", "givenName": "Patrick", "surname": "Saalfeld", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Simulation and Graphics, University of Magdeburg, Germany", "fullName": "Sylvia GlaGer", "givenName": "Sylvia", "surname": "GlaGer", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Neuroradiology, University Hospital Magdeburg, Germany", "fullName": "Oliver Beuing", "givenName": "Oliver", "surname": "Beuing", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Healthcare, Telematics and Medical Engineering, University of Magdeburg, Germany", "fullName": "Mandy Grundmann", "givenName": "Mandy", "surname": "Grundmann", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Simulation and Graphics, University of Magdeburg, Germany", "fullName": "Bernhard Preim", "givenName": "Bernhard", "surname": "Preim", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "267-268", "year": "2016", "issn": null, "isbn": "978-1-5090-0842-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07460072", "articleId": "12OmNBqv2nN", "__typename": "AdjacentArticleType" }, "next": { "fno": "07460074", "articleId": "12OmNyz5JYd", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2001/7200/0/7200hahn", "title": "Visualization and Interaction Techniques for the Exploration of Vascular Structures", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200hahn/12OmNAlvHR5", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2015/9393/0/9393a013", "title": "A FEM Model for Interactive Simulation of Guide Wire Navigation in Moving Vascular Structures", "doi": null, "abstractUrl": "/proceedings-article/isdea/2015/9393a013/12OmNB7cjkC", "parentPublication": { "id": "proceedings/isdea/2015/9393/0", "title": "2015 Sixth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300007", "title": "Advanced Curved Planar Reformation: Flattening of Vascular Structures", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300007/12OmNwErpEV", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543593", "title": "Vascular tree reconstruction by minimizing a physiological functional cost", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543593/12OmNwswg62", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vg/2005/26/0/01500523", "title": "Scaffolding-based segmentation of coronary vascular structures", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500523/12OmNyLA5zV", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2007/1630/0/04409172", "title": "Detection of Complex Vascular Structures using Polar Neighborhood Intensity Profile", "doi": null, "abstractUrl": "/proceedings-article/iccv/2007/04409172/12OmNyQYtaZ", "parentPublication": { "id": "proceedings/iccv/2007/1630/0", "title": "2007 11th IEEE International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011121997", "title": "Context Preserving Maps of Tubular Structures", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011121997/13rRUwfI0Q8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061603", "title": "Effective visualization of complex vascular structures using a non-parametric vessel detection method", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061603/13rRUygT7f6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v0877", "title": "Real-Time Illustration of Vascular Structures", "doi": null, "abstractUrl": "/journal/tg/2006/05/v0877/13rRUytF41r", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a064", "title": "VRContour: Bringing Contour Delineations of Medical Structures Into Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a064/1JrRc4SdYgU", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAD", "title": "2010 IEEE Virtual Reality Conference (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNxveNJV", "doi": "10.1109/VR.2010.5444806", "title": "In-Place Sketching for content authoring in Augmented Reality games", "normalizedTitle": "In-Place Sketching for content authoring in Augmented Reality games", "abstract": "Sketching leverages human skills for various purposes. In-Place Augmented Reality Sketching experiences build on the intuitiveness and flexibility of hand sketching for tasks like content creation. In this paper we explore the design space of In-Place Augmented Reality Sketching, with particular attention to content authoring in games. We propose a contextual model that offers a framework for the exploration of this design space by the research community. We describe a sketch-based AR racing game we developed to demonstrate the proposed model. The game is developed on top of our shape recognition and 3D registration library for mobile AR.", "abstracts": [ { "abstractType": "Regular", "content": "Sketching leverages human skills for various purposes. In-Place Augmented Reality Sketching experiences build on the intuitiveness and flexibility of hand sketching for tasks like content creation. In this paper we explore the design space of In-Place Augmented Reality Sketching, with particular attention to content authoring in games. We propose a contextual model that offers a framework for the exploration of this design space by the research community. We describe a sketch-based AR racing game we developed to demonstrate the proposed model. The game is developed on top of our shape recognition and 3D registration library for mobile AR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Sketching leverages human skills for various purposes. In-Place Augmented Reality Sketching experiences build on the intuitiveness and flexibility of hand sketching for tasks like content creation. In this paper we explore the design space of In-Place Augmented Reality Sketching, with particular attention to content authoring in games. We propose a contextual model that offers a framework for the exploration of this design space by the research community. We describe a sketch-based AR racing game we developed to demonstrate the proposed model. The game is developed on top of our shape recognition and 3D registration library for mobile AR.", "fno": "05444806", "keywords": [ "Augmented Reality", "User Interfaces", "Virtual Reality", "Humans", "Space Technology", "Space Exploration", "Image Reconstruction", "Context Modeling", "Shape", "Software Libraries", "Sketch Interaction", "In Place Augmented Reality Sketching", "Tangible Interaction", "User Interface" ], "authors": [ { "affiliation": "The Visual Media Lab, Ben-Gurion University, Beer-Sheva, Israel", "fullName": "Nate Hagbi", "givenName": "Nate", "surname": "Hagbi", "__typename": "ArticleAuthorType" }, { "affiliation": "The HIT Lab NZ, University of Canterbury, New Zealand", "fullName": "Raphaël Grasset", "givenName": "Raphaël", "surname": "Grasset", "__typename": "ArticleAuthorType" }, { "affiliation": "The Visual Media Lab, Ben-Gurion University, Beer-Sheva, Israel", "fullName": "Oriel Bergig", "givenName": "Oriel", "surname": "Bergig", "__typename": "ArticleAuthorType" }, { "affiliation": "The HIT Lab NZ, University of Canterbury, New Zealand", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" }, { "affiliation": "The Visual Media Lab, Ben-Gurion University, Beer-Sheva, Israel", "fullName": "Jihad El-Sana", "givenName": "Jihad", "surname": "El-Sana", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "91-94", "year": "2010", "issn": "1087-8270", "isbn": "978-1-4244-6237-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444809", "articleId": "12OmNx2QUIE", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444807", "articleId": "12OmNx4yvDy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsc/2016/0662/0/0662a358", "title": "Mobile Augmented Reality Authoring Tool", "doi": null, "abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC", "parentPublication": { "id": "proceedings/icsc/2016/0662/0", "title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2010/4055/0/4055a422", "title": "Learning Words Using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icalt/2010/4055a422/12OmNBpEeQM", "parentPublication": { "id": "proceedings/icalt/2010/4055/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc/2012/4843/0/4843a822", "title": "Enhancing Traditional Games with Augmented Reality Technologies", "doi": null, "abstractUrl": "/proceedings-article/uic-atc/2012/4843a822/12OmNCmGNYK", "parentPublication": { "id": "proceedings/uic-atc/2012/4843/0", "title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2009/5390/0/05336490", "title": "In-place 3D sketching for authoring and augmenting mechanical systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2009/05336490/12OmNwCJOQP", "parentPublication": { "id": "proceedings/ismar/2009/5390/0", "title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643593", "title": "Augmented reality for board games", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643593/12OmNxj23c6", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2007/2777/0/04197393", "title": "System for Creating Games in Augmented Environments", "doi": null, "abstractUrl": "/proceedings-article/mue/2007/04197393/12OmNxuXcC3", "parentPublication": { "id": "proceedings/mue/2007/2777/0", "title": "2007 International Conference on Multimedia and Ubiquitous Engineering (MUE'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a312", "title": "Designing Augmented Reality Games for Mobile Learning Using an Instructional-Motivational Paradigm", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a312/12OmNzUPpdf", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/04/mcg2008040040", "title": "Toward Next-Gen Mobile AR Games", "doi": null, "abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iceee/2019/3910/0/391000a079", "title": "Desktop Artillery Simulation Using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/iceee/2019/391000a079/1cpqGEpXo5O", "parentPublication": { "id": "proceedings/iceee/2019/3910/0", "title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a407", "title": "How does Augmented Reality Improve the Play Experience in Current Augmented Reality Enhanced Smartphone Games?", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a407/1fHkpdxeyWI", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx7ouU1", "title": "2010 International Conference on Cyberworlds", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNz2C1zv", "doi": "10.1109/CW.2010.23", "title": "Reference Plane Assisted Sketching Interface for 3D Freeform Shape Design", "normalizedTitle": "Reference Plane Assisted Sketching Interface for 3D Freeform Shape Design", "abstract": "This paper presents a sketch-based modeling system with auxiliary planes as references for 3D freeform shape design. The user first creates a rough 3D model of arbitrary topology by sketching some contours of the model. Then the user can use sketching to perform deformation, extrusion, etc, to edit the model. To regularize and interpret the user's inputs properly, we introduce some rules for the strokes into the system, which are based on both the semantic meaning of the sketched strokes and human psychology. Unlike other sketching systems, all the creation and editing operations in the presented system are performed with reference to some auxiliary planes that are automatically constructed based on the user’s sketches or default settings. The use of reference planes provides a heuristic solution to the problem of ambiguity of 2D interface for modeling in 3D space. Examples demonstrate that the presented system can allow the user to intuitively and intelligently create and edit 3D models even with complex topology, which is usually difficult in other similar sketch-based modeling systems.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a sketch-based modeling system with auxiliary planes as references for 3D freeform shape design. The user first creates a rough 3D model of arbitrary topology by sketching some contours of the model. Then the user can use sketching to perform deformation, extrusion, etc, to edit the model. To regularize and interpret the user's inputs properly, we introduce some rules for the strokes into the system, which are based on both the semantic meaning of the sketched strokes and human psychology. Unlike other sketching systems, all the creation and editing operations in the presented system are performed with reference to some auxiliary planes that are automatically constructed based on the user’s sketches or default settings. The use of reference planes provides a heuristic solution to the problem of ambiguity of 2D interface for modeling in 3D space. Examples demonstrate that the presented system can allow the user to intuitively and intelligently create and edit 3D models even with complex topology, which is usually difficult in other similar sketch-based modeling systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a sketch-based modeling system with auxiliary planes as references for 3D freeform shape design. The user first creates a rough 3D model of arbitrary topology by sketching some contours of the model. Then the user can use sketching to perform deformation, extrusion, etc, to edit the model. To regularize and interpret the user's inputs properly, we introduce some rules for the strokes into the system, which are based on both the semantic meaning of the sketched strokes and human psychology. Unlike other sketching systems, all the creation and editing operations in the presented system are performed with reference to some auxiliary planes that are automatically constructed based on the user’s sketches or default settings. The use of reference planes provides a heuristic solution to the problem of ambiguity of 2D interface for modeling in 3D space. Examples demonstrate that the presented system can allow the user to intuitively and intelligently create and edit 3D models even with complex topology, which is usually difficult in other similar sketch-based modeling systems.", "fno": "4215a105", "keywords": [ "Sketch Based Interface And Modeling", "Interaction Techniques", "Freeform Shape Design", "Deformations", "Reference Planes" ], "authors": [ { "affiliation": null, "fullName": "Kai Wang", "givenName": "Kai", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianmin Zheng", "givenName": "Jianmin", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hock-Soon Seah", "givenName": "Hock-Soon", "surname": "Seah", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "105-112", "year": "2010", "issn": null, "isbn": "978-0-7695-4215-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4215a097", "articleId": "12OmNzYwcdu", "__typename": "AdjacentArticleType" }, "next": { "fno": "4215a113", "articleId": "12OmNzahc4Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2012/4829/0/4829a150", "title": "Rapid Visualization of Geological Concepts", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2012/4829a150/12OmNCdBDWk", "parentPublication": { "id": "proceedings/sibgrapi/2012/4829/0", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d049", "title": "An Interactive Sketching Modeling System: Sketch3D", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d049/12OmNroij4Y", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543514", "title": "An integrated image and sketching environment for archaeological sites", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543514/12OmNwnYG1F", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmag/2003/1985/0/19850188", "title": "FreeSculptor: A Computer-Aided Freeform Design Environment", "doi": null, "abstractUrl": "/proceedings-article/gmag/2003/19850188/12OmNzwHvnx", "parentPublication": { "id": "proceedings/gmag/2003/1985/0", "title": "Geometric Modeling and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08421591", "title": "Model-Guided 3D Sketching", "doi": null, "abstractUrl": "/journal/tg/2019/10/08421591/13rRUEgs2Mb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040835", "title": "Scientific Sketching for Collaborative VR Visualization Design", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040835/13rRUwI5UfX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a066", "title": "Enhancing Sketching and Sculpting for Shape Modeling", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a066/17D45WWzW7i", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049645", "title": "GestureSurface: VR Sketching through Assembling Scaffold Surface with Non-Dominant Hand", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049645/1KYoyLX55fy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a826", "title": "Mid-Air Finger Sketching for Tree Modeling", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a826/1tuBbGEUWm4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a329", "title": "BuildingSketch: Freehand Mid-Air Sketching for Building Modeling", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a329/1yeCWcklIfm", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgJRvtdtu", "doi": "10.1109/VR55154.2023.00052", "title": "WARPY: Sketching Environment-Aware 3D Curves in Mobile Augmented Reality", "normalizedTitle": "WARPY: Sketching Environment-Aware 3D Curves in Mobile Augmented Reality", "abstract": "Three-dimensional curve drawing in Augmented Reality (AR) enables users to create 3D curves that fit within the real-world scene. It has applications in 3D design, sculpting, and animation. However, the task complexity increases when the desirable path for the curve is obstructed by the physical environment or by what the camera can see. For example, it is difficult to draw a curve that wraps around an object or scales to out-of-reach places. We propose WARPY, an environment-aware 3D curve drawing tool for mobile AR. Our system enables users to draw freeform curves from a distance in AR by combining 2D-to-3D sketch inference with geometric proxies. Geometric Proxies can be obtained via 3D scanning or from a list of pre-defined primitives. WARPY also provides a multi-view mode to enable users to sketch a curve from multiple viewpoints, which is useful if the target curve cannot fit within the camera&#x0027;s field of view. We conducted two user studies and found that WARPY can be a viable tool to help users create complex and large curves in AR.", "abstracts": [ { "abstractType": "Regular", "content": "Three-dimensional curve drawing in Augmented Reality (AR) enables users to create 3D curves that fit within the real-world scene. It has applications in 3D design, sculpting, and animation. However, the task complexity increases when the desirable path for the curve is obstructed by the physical environment or by what the camera can see. For example, it is difficult to draw a curve that wraps around an object or scales to out-of-reach places. We propose WARPY, an environment-aware 3D curve drawing tool for mobile AR. Our system enables users to draw freeform curves from a distance in AR by combining 2D-to-3D sketch inference with geometric proxies. Geometric Proxies can be obtained via 3D scanning or from a list of pre-defined primitives. WARPY also provides a multi-view mode to enable users to sketch a curve from multiple viewpoints, which is useful if the target curve cannot fit within the camera&#x0027;s field of view. We conducted two user studies and found that WARPY can be a viable tool to help users create complex and large curves in AR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Three-dimensional curve drawing in Augmented Reality (AR) enables users to create 3D curves that fit within the real-world scene. It has applications in 3D design, sculpting, and animation. However, the task complexity increases when the desirable path for the curve is obstructed by the physical environment or by what the camera can see. For example, it is difficult to draw a curve that wraps around an object or scales to out-of-reach places. We propose WARPY, an environment-aware 3D curve drawing tool for mobile AR. Our system enables users to draw freeform curves from a distance in AR by combining 2D-to-3D sketch inference with geometric proxies. Geometric Proxies can be obtained via 3D scanning or from a list of pre-defined primitives. WARPY also provides a multi-view mode to enable users to sketch a curve from multiple viewpoints, which is useful if the target curve cannot fit within the camera's field of view. We conducted two user studies and found that WARPY can be a viable tool to help users create complex and large curves in AR.", "fno": "481500a367", "keywords": [ "Geometry", "Three Dimensional Displays", "Spirals", "Shape", "User Interfaces", "Cameras", "Animation" ], "authors": [ { "affiliation": "George Mason University", "fullName": "Rawan Alghofaili", "givenName": "Rawan", "surname": "Alghofaili", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Cuong Nguyen", "givenName": "Cuong", "surname": "Nguyen", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Vojtĕch Krs", "givenName": "Vojtĕch", "surname": "Krs", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Nathan Carr", "givenName": "Nathan", "surname": "Carr", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Radomír Mĕch", "givenName": "Radomír", "surname": "Mĕch", "__typename": "ArticleAuthorType" }, { "affiliation": "George Mason University", "fullName": "Lap-Fai Yu", "givenName": "Lap-Fai", "surname": "Yu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "367-377", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgJxvSWrK", "name": "pvr202348150-010108496s1-mm_481500a367.zip", "size": "237 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108496s1-mm_481500a367.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a356", "articleId": "1MNgROnDNsY", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a378", "articleId": "1MNgGafxH4Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836502", "title": "PoLAR: A Portable Library for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836502/12OmNAoDhRV", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2010/6846/0/05444714", "title": "AR-Mote: A wireless device for Augmented Reality environment", "doi": null, "abstractUrl": "/proceedings-article/3dui/2010/05444714/12OmNviZlmq", "parentPublication": { "id": "proceedings/3dui/2010/6846/0", "title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2001/1318/0/13180031", "title": "Tinmith-Metro: New Outdoor Techniques for Creating City Models with an Augmented Reality Wearable Computer", "doi": null, "abstractUrl": "/proceedings-article/iswc/2001/13180031/12OmNvlPkGK", "parentPublication": { "id": "proceedings/iswc/2001/1318/0", "title": "Proceedings Fifth International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480755", "title": "Augmented Reality for Industrial Building Acceptance", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480755/12OmNwc3wyn", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444806", "title": "In-Place Sketching for content authoring in Augmented Reality games", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444806/12OmNxveNJV", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480808", "title": "Creating Meaningful Environment Models for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480808/12OmNzYwc7n", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2013/06/mic2013060066", "title": "Augmented Reality Interfaces", "doi": null, "abstractUrl": "/magazine/ic/2013/06/mic2013060066/13rRUIJcWhZ", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2002/07/i0962", "title": "Digital Curves in 3D Space and a Linear-Time Length Estimation Algorithm", "doi": null, "abstractUrl": "/journal/tp/2002/07/i0962/13rRUwI5TYw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a662", "title": "A Tangible Augmented Reality Programming Learning Environment for Textual Languages", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a662/1CJd5Up2emQ", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a067", "title": "Industrial Augmented Reality: Concepts and User Interface Designs for Augmented Reality Maintenance Worker Support Systems", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a067/1pBMhXqBhCM", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qpzz6dhLLq", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qpzBgQPoWI", "doi": "10.1109/AIVR50618.2020.00077", "title": "3D Model Retrieval Using Constructive Solid Geometry in Virtual Reality", "normalizedTitle": "3D Model Retrieval Using Constructive Solid Geometry in Virtual Reality", "abstract": "3D models play an increasingly important role in various areas, ranging from engineering to the cultural heritage domain. Therefore, tools to effectively and efficiently manage, explore and search large 3D model collections have become more important over the years. Most solutions so far use conventional 2D user interfaces and interaction relies on mouse and keyboard input. In this paper, we present vitrivr-VR, a 3D model retrieval system featuring a virtual reality (VR) user interface based on the multimedia search system vitrivr. Query formulation and results presentation takes place in a VR environment, in which users immerse themselves. vitrivr-VR enables the sculpting of 3D models through constructive solid geometry (CSG) using a VR controller and the use of these sculpted objects as query objects. To the best of our knowledge, vitrivr-VR is the first system that combines CSG and VR to enable 3D model retrieval.", "abstracts": [ { "abstractType": "Regular", "content": "3D models play an increasingly important role in various areas, ranging from engineering to the cultural heritage domain. Therefore, tools to effectively and efficiently manage, explore and search large 3D model collections have become more important over the years. Most solutions so far use conventional 2D user interfaces and interaction relies on mouse and keyboard input. In this paper, we present vitrivr-VR, a 3D model retrieval system featuring a virtual reality (VR) user interface based on the multimedia search system vitrivr. Query formulation and results presentation takes place in a VR environment, in which users immerse themselves. vitrivr-VR enables the sculpting of 3D models through constructive solid geometry (CSG) using a VR controller and the use of these sculpted objects as query objects. To the best of our knowledge, vitrivr-VR is the first system that combines CSG and VR to enable 3D model retrieval.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D models play an increasingly important role in various areas, ranging from engineering to the cultural heritage domain. Therefore, tools to effectively and efficiently manage, explore and search large 3D model collections have become more important over the years. Most solutions so far use conventional 2D user interfaces and interaction relies on mouse and keyboard input. In this paper, we present vitrivr-VR, a 3D model retrieval system featuring a virtual reality (VR) user interface based on the multimedia search system vitrivr. Query formulation and results presentation takes place in a VR environment, in which users immerse themselves. vitrivr-VR enables the sculpting of 3D models through constructive solid geometry (CSG) using a VR controller and the use of these sculpted objects as query objects. To the best of our knowledge, vitrivr-VR is the first system that combines CSG and VR to enable 3D model retrieval.", "fno": "746300a373", "keywords": [ "Graphical User Interfaces", "Multimedia Systems", "Query Processing", "Solid Modelling", "Virtual Reality", "Keyboard Input", "Vitrivr VR", "3 D Model Retrieval System", "Virtual Reality User Interface", "Multimedia Search System Vitrivr", "Constructive Solid Geometry", "VR Controller", "2 D User Interfaces", "Mouse", "Sculpted Objects", "Query Objects", "CSG", "Solid Modeling", "Three Dimensional Displays", "Virtual Reality", "User Interfaces", "Geometry", "Computational Modeling", "Multimedia Databases", "3 D Model Retrieval", "Virtual Reality", "Constructive Solid Geometry", "Multimedia Retrieval", "Open Source" ], "authors": [ { "affiliation": "University of Basel,Basel,Switzerland", "fullName": "Samuel Börlin", "givenName": "Samuel", "surname": "Börlin", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Basel,Basel,Switzerland", "fullName": "Ralph Gasser", "givenName": "Ralph", "surname": "Gasser", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Basel,Basel,Switzerland", "fullName": "Florian Spiess", "givenName": "Florian", "surname": "Spiess", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Basel,Basel,Switzerland", "fullName": "Heiko Schuldt", "givenName": "Heiko", "surname": "Schuldt", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "373-374", "year": "2020", "issn": null, "isbn": "978-1-7281-7463-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "746300a371", "articleId": "1qpzCZXhpS0", "__typename": "AdjacentArticleType" }, "next": { "fno": "746300a375", "articleId": "1qpzDhur636", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2014/4677/0/4677a063", "title": "Constructive Roof Geometry", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a063/12OmNBlXs1W", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892357", "title": "View-aware tile-based adaptations in 360 virtual reality video streaming", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892357/12OmNqJZgGI", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisis/2009/3575/0/3575a162", "title": "Reconstruction for Artificial Degraded Image Using Constructive Solid Geometry and Strongly Typed Genetic Programming", "doi": null, "abstractUrl": "/proceedings-article/cisis/2009/3575a162/12OmNrYlmM1", "parentPublication": { "id": "proceedings/cisis/2009/3575/0", "title": "2009 International Conference on Complex, Intelligent and Software Intensive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476588", "title": "Tech Note: Digital Foam", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476588/12OmNwuvrVb", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270254", "title": "Haptic Sculpting of Volumetric Implicit Functions", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270254/12OmNzV70HP", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892345", "title": "Anatomy builder VR: Applying a constructive learning method in the virtual reality canine skeletal system", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892345/12OmNzd7bOL", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1986/09/mcg1986090029", "title": "Depth-Buffering Display Techniques for Constructive Solid Geometry", "doi": null, "abstractUrl": "/magazine/cg/1986/09/mcg1986090029/13rRUIJuxxG", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2017/02/mmu2017020104", "title": "A Light-Field Journey to Virtual Reality", "doi": null, "abstractUrl": "/magazine/mu/2017/02/mmu2017020104/13rRUy08MBf", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imet/2022/7016/0/09929445", "title": "Explorative Study on Asymmetric Sketch Interactions for Object Retrieval in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/imet/2022/09929445/1HYuRB6nAty", "parentPublication": { "id": "proceedings/imet/2022/7016/0", "title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/05/09293398", "title": "Neural Shape Parsers for Constructive Solid Geometry", "doi": null, "abstractUrl": "/journal/tp/2022/05/09293398/1pyoijUcZZ6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNySXF3j", "title": "2015 5th International Conference on IT Convergence and Security (ICITCS)", "acronym": "icitcs", "groupId": "1803418", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBLdKIv", "doi": "10.1109/ICITCS.2015.7292957", "title": "Background Subtraction from Auto-Exposure Surveillance Camera in Restaurant", "normalizedTitle": "Background Subtraction from Auto-Exposure Surveillance Camera in Restaurant", "abstract": "This paper introduces an approach to background subtraction from auto-exposure surveillance camera. The proposed method adapts the concept of codebook while the original codebook has a limitation in auto-exposure camera. These videos have light adjustment over time and it leads to brightness variation, and the variation could occur globally or locally. The proposed method uses the advantage of YCbCr color space and adjusts the criteria for the segmentation in the original codebook. Moreover, the proposed method can process the input video in real time and can reduce noises due to illumination change. The experimental results show that the proposed method can reduce a lot of noises in the video, and also have a better accuracy that the original codebook method.", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces an approach to background subtraction from auto-exposure surveillance camera. The proposed method adapts the concept of codebook while the original codebook has a limitation in auto-exposure camera. These videos have light adjustment over time and it leads to brightness variation, and the variation could occur globally or locally. The proposed method uses the advantage of YCbCr color space and adjusts the criteria for the segmentation in the original codebook. Moreover, the proposed method can process the input video in real time and can reduce noises due to illumination change. The experimental results show that the proposed method can reduce a lot of noises in the video, and also have a better accuracy that the original codebook method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces an approach to background subtraction from auto-exposure surveillance camera. The proposed method adapts the concept of codebook while the original codebook has a limitation in auto-exposure camera. These videos have light adjustment over time and it leads to brightness variation, and the variation could occur globally or locally. The proposed method uses the advantage of YCbCr color space and adjusts the criteria for the segmentation in the original codebook. Moreover, the proposed method can process the input video in real time and can reduce noises due to illumination change. The experimental results show that the proposed method can reduce a lot of noises in the video, and also have a better accuracy that the original codebook method.", "fno": "07292957", "keywords": [ "Videos", "Brightness", "Lighting", "Cameras", "Image Color Analysis", "Noise", "Image Segmentation" ], "authors": [ { "affiliation": null, "fullName": "Boonyakorn Jantaranuson", "givenName": "Boonyakorn", "surname": "Jantaranuson", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nagul Cooharojananone", "givenName": "Nagul", "surname": "Cooharojananone", "__typename": "ArticleAuthorType" } ], "idPrefix": "icitcs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-08-01T00:00:00", "pubType": "proceedings", "pages": "1-5", "year": "2015", "issn": null, "isbn": "978-1-4673-6537-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07292956", "articleId": "12OmNy7Qfum", "__typename": "AdjacentArticleType" }, "next": { "fno": "07292958", "articleId": "12OmNxvwoWB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cis/2014/7434/0/7434a704", "title": "A Codebook Based Background Subtraction Method for Image Defects Detection", "doi": null, "abstractUrl": "/proceedings-article/cis/2014/7434a704/12OmNAFWOR2", "parentPublication": { "id": "proceedings/cis/2014/7434/0", "title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2014/7978/0/7978a119", "title": "Foreground-Background Segmentation Based on Codebook and Edge Detector", "doi": null, "abstractUrl": "/proceedings-article/sitis/2014/7978a119/12OmNAJ4piS", "parentPublication": { "id": "proceedings/sitis/2014/7978/0", "title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a314", "title": "HDRFusion: HDR SLAM Using a Low-Cost Auto-Exposure RGB-D Sensor", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a314/12OmNwK7obN", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/289/1/05750611", "title": "The Soccer Robot the Auto-adapted Threshold Value Method Based on HSI and RGB", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750611/12OmNwwMf1h", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2014/4871/0/06918678", "title": "A spatiotemporal background extractor using a single-layer codebook model", "doi": null, "abstractUrl": "/proceedings-article/avss/2014/06918678/12OmNxH9X8z", "parentPublication": { "id": "proceedings/avss/2014/4871/0", "title": "2014 International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130461", "title": "Multi-scale multi-feature codebook-based background subtraction", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130461/12OmNyRxFwU", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2015/9721/0/9721a110", "title": "An Adaptive Codebook Model for Change Detection with Dynamic Background", "doi": null, "abstractUrl": "/proceedings-article/sitis/2015/9721a110/12OmNzTH0Rc", "parentPublication": { "id": "proceedings/sitis/2015/9721/0", "title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093643", "title": "An Extended Exposure Fusion and its Application to Single Image Contrast Enhancement", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093643/1jPbwJf3Ta0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h706", "title": "Neural Auto-Exposure for High-Dynamic Range Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h706/1yeJuGu5Xvq", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0566", "title": "Auto-Exposure Fusion for Single-Image Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0566/1yeKp9CxQYM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7L", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNBOlllb", "doi": "10.1109/CRV.2014.8", "title": "Generalized Exposure Fusion Weights Estimation", "normalizedTitle": "Generalized Exposure Fusion Weights Estimation", "abstract": "Only a small part of the large intensities interval found in high dynamic range scenes can be captured with usual image sensors. This is why delivered images may contain under or overexposed pixels. A popular approach to overcome this problem is to take several images using different exposure parameters, and then fuse them into one single image. This exposure fusion is mostly performed as a weighted average between the corresponding pixels. The challenge is to find weights that produce best fused image quality and in a minimum amount of operations to meet real time requirements. In this paper we present a supervised learning method to estimate generalized exposure fusion weights and we demonstrate how they can be used to fuse any exposures very fast. Subjective and objective comparisons with some relevant works are conducted to prove the effectiveness of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Only a small part of the large intensities interval found in high dynamic range scenes can be captured with usual image sensors. This is why delivered images may contain under or overexposed pixels. A popular approach to overcome this problem is to take several images using different exposure parameters, and then fuse them into one single image. This exposure fusion is mostly performed as a weighted average between the corresponding pixels. The challenge is to find weights that produce best fused image quality and in a minimum amount of operations to meet real time requirements. In this paper we present a supervised learning method to estimate generalized exposure fusion weights and we demonstrate how they can be used to fuse any exposures very fast. Subjective and objective comparisons with some relevant works are conducted to prove the effectiveness of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Only a small part of the large intensities interval found in high dynamic range scenes can be captured with usual image sensors. This is why delivered images may contain under or overexposed pixels. A popular approach to overcome this problem is to take several images using different exposure parameters, and then fuse them into one single image. This exposure fusion is mostly performed as a weighted average between the corresponding pixels. The challenge is to find weights that produce best fused image quality and in a minimum amount of operations to meet real time requirements. In this paper we present a supervised learning method to estimate generalized exposure fusion weights and we demonstrate how they can be used to fuse any exposures very fast. Subjective and objective comparisons with some relevant works are conducted to prove the effectiveness of the proposed method.", "fno": "4337a071", "keywords": [ "Estimation", "Fuses", "Dynamic Range", "Image Quality", "Supervised Learning", "Cameras", "Image Fusion", "Weighting Function", "High Dynamic Range Scenes", "Exposure Fusion" ], "authors": [ { "affiliation": null, "fullName": "Mohammed Elamine Moumene", "givenName": "Mohammed Elamine", "surname": "Moumene", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rachid Nourine", "givenName": "Rachid", "surname": "Nourine", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Djemel Ziou", "givenName": "Djemel", "surname": "Ziou", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "71-76", "year": "2014", "issn": null, "isbn": "978-1-4799-4337-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4337a063", "articleId": "12OmNxVlTGA", "__typename": "AdjacentArticleType" }, "next": { "fno": "4337a077", "articleId": "12OmNrYCXVo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pg/2007/3009/0/30090382", "title": "Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/pg/2007/30090382/12OmNx4gUxY", "parentPublication": { "id": "proceedings/pg/2007/3009/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032e724", "title": "DeepFuse: A Deep Unsupervised Approach for Exposure Fusion with Extreme Exposure Image Pairs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e724/12OmNxQOjBX", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019529", "title": "Multi-scale exposure fusion via gradient domain guided image filtering", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019529/12OmNxw5Bam", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/psivt/2010/4285/0/4285a501", "title": "Subband Architecture Based Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/psivt/2010/4285a501/12OmNy3RRLg", "parentPublication": { "id": "proceedings/psivt/2010/4285/0", "title": "Image and Video Technology, Pacific-Rim Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnlp/2022/9544/0/954400a195", "title": "Attention Guided Network for Multi Exposure Image Fusion", "doi": null, "abstractUrl": "/proceedings-article/icnlp/2022/954400a195/1GNtgEFW0w0", "parentPublication": { "id": "proceedings/icnlp/2022/9544/0", "title": "2022 4th International Conference on Natural Language Processing (ICNLP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075235", "title": "An efficient multi-exposure image fusion algorithm", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075235/1LRlkJmXwJO", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a417", "title": "Densely Connected Convolutional Networks for Multi-Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a417/1gjRwnmVemY", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093643", "title": "An Extended Exposure Fusion and its Application to Single Image Contrast Enhancement", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093643/1jPbwJf3Ta0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/01/09151265", "title": "U2Fusion: A Unified Unsupervised Image Fusion Network", "doi": null, "abstractUrl": "/journal/tp/2022/01/09151265/1lPClHxbHEc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0566", "title": "Auto-Exposure Fusion for Single-Image Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0566/1yeKp9CxQYM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyUWQQv", "title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)", "acronym": "icme", "groupId": "1000477", "volume": "1", "displayVolume": "1", "year": "2003", "__typename": "ProceedingType" }, "article": { "id": "12OmNC1Y5nO", "doi": "10.1109/ICME.2003.1220976", "title": "Image quality improvement by adaptive exposure correction techniques", "normalizedTitle": "Image quality improvement by adaptive exposure correction techniques", "abstract": "The proposed paper concerns the processing of images in digital format and, more specifically, particular techniques that can be advantageously used in digital still cameras for improving the quality of images acquired with a non-optimal exposure. The proposed approach analyses the CCD/CMOS sensor Bayer data or the corresponding color generated image and, after identifying specific features, it adjusts the exposure level according to a 'camera response' like function.", "abstracts": [ { "abstractType": "Regular", "content": "The proposed paper concerns the processing of images in digital format and, more specifically, particular techniques that can be advantageously used in digital still cameras for improving the quality of images acquired with a non-optimal exposure. The proposed approach analyses the CCD/CMOS sensor Bayer data or the corresponding color generated image and, after identifying specific features, it adjusts the exposure level according to a 'camera response' like function.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The proposed paper concerns the processing of images in digital format and, more specifically, particular techniques that can be advantageously used in digital still cameras for improving the quality of images acquired with a non-optimal exposure. The proposed approach analyses the CCD/CMOS sensor Bayer data or the corresponding color generated image and, after identifying specific features, it adjusts the exposure level according to a 'camera response' like function.", "fno": "7965549", "keywords": [], "authors": [ { "affiliation": "STMicroelectronics, Catania, Italy", "fullName": "G. Messina", "givenName": "G.", "surname": "Messina", "__typename": "ArticleAuthorType" }, { "affiliation": "STMicroelectronics, Catania, Italy", "fullName": "A. Castorina", "givenName": "A.", "surname": "Castorina", "__typename": "ArticleAuthorType" }, { "affiliation": "STMicroelectronics, Catania, Italy", "fullName": "S. Battiato", "givenName": "S.", "surname": "Battiato", "__typename": "ArticleAuthorType" }, { "affiliation": "STMicroelectronics, Catania, Italy", "fullName": "A. Bosco", "givenName": "A.", "surname": "Bosco", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2003-07-01T00:00:00", "pubType": "proceedings", "pages": "549-552", "year": "2003", "issn": null, "isbn": "0-7803-7965-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7965545", "articleId": "12OmNxG1yFR", "__typename": "AdjacentArticleType" }, "next": { "fno": "7965553", "articleId": "12OmNzgeLGM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "14jQfMYohco", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "14jQfOq5wiI", "doi": "10.1109/ICME.2018.8486569", "title": "Image Exposure Assessment: A Benchmark and a Deep Convolutional Neural Networks Based Model", "normalizedTitle": "Image Exposure Assessment: A Benchmark and a Deep Convolutional Neural Networks Based Model", "abstract": "In the camera equipment manufacturing industry, the exposure calibration is one of the basic steps for manufacturers to consider before launching their products to the market. To this end, a method that can objectively and automatically assess the exposure levels of images taken by the camera is highly desired. However, few studies have been conducted in this area. In this paper, we attempt to solve this issue to some extent and our contributions are twofold. Firstly, in order to facilitate the study of image exposure assessment, an Image Exposure Database Z_$(IE_{ps}D)$_Z is established. In this database, there are 15, 582 images with various exposure levels, and for each image there is an associated subjective exposure score which could reflect its perceptual exposure level. Secondly, we propose a novel highly accurate DCNN-based model, namely Z_$IE_{ps}M$_Z (Image Exposure Metric), to predict the exposure level of a given image.", "abstracts": [ { "abstractType": "Regular", "content": "In the camera equipment manufacturing industry, the exposure calibration is one of the basic steps for manufacturers to consider before launching their products to the market. To this end, a method that can objectively and automatically assess the exposure levels of images taken by the camera is highly desired. However, few studies have been conducted in this area. In this paper, we attempt to solve this issue to some extent and our contributions are twofold. Firstly, in order to facilitate the study of image exposure assessment, an Image Exposure Database $(IE_{ps}D)$ is established. In this database, there are 15, 582 images with various exposure levels, and for each image there is an associated subjective exposure score which could reflect its perceptual exposure level. Secondly, we propose a novel highly accurate DCNN-based model, namely $IE_{ps}M$ (Image Exposure Metric), to predict the exposure level of a given image.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the camera equipment manufacturing industry, the exposure calibration is one of the basic steps for manufacturers to consider before launching their products to the market. To this end, a method that can objectively and automatically assess the exposure levels of images taken by the camera is highly desired. However, few studies have been conducted in this area. In this paper, we attempt to solve this issue to some extent and our contributions are twofold. Firstly, in order to facilitate the study of image exposure assessment, an Image Exposure Database - is established. In this database, there are 15, 582 images with various exposure levels, and for each image there is an associated subjective exposure score which could reflect its perceptual exposure level. Secondly, we propose a novel highly accurate DCNN-based model, namely - (Image Exposure Metric), to predict the exposure level of a given image.", "fno": "08486569", "keywords": [ "Histograms", "Cameras", "Calibration", "Databases", "Lighting", "Benchmark Testing", "Exposure Levels", "Image Quality Assessment", "Deep Convolutional Neural Networks" ], "authors": [ { "affiliation": "School of Software Engineering, Tongji University, Shanghai, China", "fullName": "Lijun Zhang", "givenName": "Lijun", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Software Engineering, Tongji University, Shanghai, China", "fullName": "Lin Zhang", "givenName": "Lin", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Software Engineering, Tongji University, Shanghai, China", "fullName": "Xiao Liu", "givenName": "Xiao", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Software Engineering, Tongji University, Shanghai, China", "fullName": "Ying Shen", "givenName": "Ying", "surname": "Shen", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Software Engineering, Tongji University, Shanghai, China", "fullName": "Dongqing Wang", "givenName": "Dongqing", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2018", "issn": null, "isbn": "978-1-5386-1737-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08486601", "articleId": "14jQfNzRkmX", "__typename": "AdjacentArticleType" }, "next": { "fno": "08486592", "articleId": "14jQfNpDLGx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ssiai/2014/4053/0/06806031", "title": "Low-cost camera array for mitigating lighting range effects", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2014/06806031/12OmNAT0mLN", "parentPublication": { "id": "proceedings/ssiai/2014/4053/0", "title": "2014 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2015/6537/0/07292957", "title": "Background Subtraction from Auto-Exposure Surveillance Camera in Restaurant", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2015/07292957/12OmNBLdKIv", "parentPublication": { "id": "proceedings/icitcs/2015/6537/0", "title": "2015 5th International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2007/3009/0/30090382", "title": "Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/pg/2007/30090382/12OmNx4gUxY", "parentPublication": { "id": "proceedings/pg/2007/3009/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011903", "title": "A no reference metric for the quality assessment of videos affected by exposure distortion", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011903/12OmNzTYBZ4", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2012/4836/0/4836a007", "title": "Real-time Continuous Geometric Calibration for Projector-Camera System under Ambient Illumination", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2012/4836a007/12OmNzahc85", "parentPublication": { "id": "proceedings/icvrv/2012/4836/0", "title": "2012 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08437183", "title": "Personalized Exposure Control Using Adaptive Metering and Reinforcement Learning", "doi": null, "abstractUrl": "/journal/tg/2019/10/08437183/13rRUIM2VBP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2019/3263/0/08747329", "title": "A Fast, Scalable, and Reliable Deghosting Method for Extreme Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccp/2019/08747329/1bcJwwzmhUI", "parentPublication": { "id": "proceedings/iccp/2019/3263/0", "title": "2019 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b817", "title": "Learning a Reinforced Agent for Flexible Exposure Bracketing Selection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b817/1m3o10tCNqg", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j153", "title": "Learning Multi-Scale Photo Exposure Correction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j153/1yeHUUVQqha", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1931", "title": "Digital Gimbal: End-to-end Deep Image Stabilization with Learnable Exposure Times", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1931/1yeJTKHiG76", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHouTvRibC", "doi": "10.1109/ICPR56361.2022.9956188", "title": "Active Short-Long Exposure Deblurring", "normalizedTitle": "Active Short-Long Exposure Deblurring", "abstract": "Mobile phones can capture image bursts to produce high quality still photographs. The simplest form of a burst is two frame short-long (S-L) exposure. S-L exposure is particularly suitable in low light conditions where short exposure frames are sharp but noisy and dark, and long exposure frames are affected by motion blur but have better scene chromaticity and luminance. In this work, we take a step further and define active short-long exposure deblurring where the viewfinder frames before the burst are used to optimize the S-L exposure parameters. We introduce deep architectures and data generation for active S-L exposure deblurring. The approach is experimentally validated with realistic data and it shows clear improvements. For the most difficult scenes (worst 5%) the PSNR is improved by +1.39dB.", "abstracts": [ { "abstractType": "Regular", "content": "Mobile phones can capture image bursts to produce high quality still photographs. The simplest form of a burst is two frame short-long (S-L) exposure. S-L exposure is particularly suitable in low light conditions where short exposure frames are sharp but noisy and dark, and long exposure frames are affected by motion blur but have better scene chromaticity and luminance. In this work, we take a step further and define active short-long exposure deblurring where the viewfinder frames before the burst are used to optimize the S-L exposure parameters. We introduce deep architectures and data generation for active S-L exposure deblurring. The approach is experimentally validated with realistic data and it shows clear improvements. For the most difficult scenes (worst 5%) the PSNR is improved by +1.39dB.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mobile phones can capture image bursts to produce high quality still photographs. The simplest form of a burst is two frame short-long (S-L) exposure. S-L exposure is particularly suitable in low light conditions where short exposure frames are sharp but noisy and dark, and long exposure frames are affected by motion blur but have better scene chromaticity and luminance. In this work, we take a step further and define active short-long exposure deblurring where the viewfinder frames before the burst are used to optimize the S-L exposure parameters. We introduce deep architectures and data generation for active S-L exposure deblurring. The approach is experimentally validated with realistic data and it shows clear improvements. For the most difficult scenes (worst 5%) the PSNR is improved by +1.39dB.", "fno": "09956188", "keywords": [ "Image Colour Analysis", "Image Motion Analysis", "Image Restoration", "Active Short Long Exposure Deblurring", "Data Generation", "Deep Architectures", "High Quality Still Photographs", "Image Bursts", "Long Exposure Frames", "Motion Blur", "S L Exposure Parameters", "Short Exposure Frames", "Two Frame Short Long Exposure", "Viewfinder Frames", "Parameter Estimation", "Art", "Deep Architecture", "Lighting", "Imaging", "Mobile Handsets", "Pattern Recognition" ], "authors": [ { "affiliation": "Huawei Finland & Tampere University", "fullName": "Dan Yang", "givenName": "Dan", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Finland & Tampere University", "fullName": "Samu Koskinen", "givenName": "Samu", "surname": "Koskinen", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Finland & Tampere University", "fullName": "Joni-Kristian Kämäräinen", "givenName": "Joni-Kristian", "surname": "Kämäräinen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "281-287", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956316", "articleId": "1IHoFFBmjcc", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956179", "articleId": "1IHoQE7NRu0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391d541", "title": "Complementary Sets of Shutter Sequences for Motion Deblurring", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d541/12OmNApu5Jm", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206685", "title": "Coded exposure deblurring: Optimized codes for PSF estimation and invertibility", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206685/12OmNs59JMc", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05539935", "title": "Coded exposure imaging for projective motion deblurring", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05539935/12OmNwbLVjY", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500c116", "title": "Single Image Deblurring and Camera Motion Estimation With Depth Map", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500c116/18j8IDePOfK", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2022/5851/0/09887786", "title": "Learning Spatially Varying Pixel Exposures for Motion Deblurring", "doi": null, "abstractUrl": "/proceedings-article/iccp/2022/09887786/1GZiyDaecVO", "parentPublication": { "id": "proceedings/iccp/2022/5851/0", "title": "2022 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600r7744", "title": "Unifying Motion Deblurring and Frame Interpolation with Events", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600r7744/1H1jp9gsCaY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a417", "title": "Densely Connected Convolutional Networks for Multi-Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a417/1gjRwnmVemY", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlbdbi/2020/9638/0/963800a521", "title": "Single Image Motion Deblurring Based On Modified DenseNet", "doi": null, "abstractUrl": "/proceedings-article/mlbdbi/2020/963800a521/1rxhweAU9LW", "parentPublication": { "id": "proceedings/mlbdbi/2020/9638/0", "title": "2020 2nd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1931", "title": "Digital Gimbal: End-to-end Deep Image Stabilization with Learnable Exposure Times", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1931/1yeJTKHiG76", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0566", "title": "Auto-Exposure Fusion for Single-Image Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0566/1yeKp9CxQYM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KxUhhFgzlK", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1KxVt19EMyk", "doi": "10.1109/WACV56688.2023.00405", "title": "Single-Image HDR Reconstruction by Multi-Exposure Generation", "normalizedTitle": "Single-Image HDR Reconstruction by Multi-Exposure Generation", "abstract": "High dynamic range (HDR) imaging is an indispensable technique in modern photography. Traditional methods focus on HDR reconstruction from multiple images, solving the core problems of image alignment, fusion, and tone mapping, yet having a perfect solution due to ghosting and other visual artifacts in the reconstruction. Recent attempts at single-image HDR reconstruction show a promising alternative: by learning to map pixel values to their irradiance using a neural network, one can bypass the align-and-merge pipeline completely yet still obtain a high-quality HDR image. In this work, we propose a weakly supervised learning method that inverts the physical image formation process for HDR reconstruction via learning to generate multiple exposures from a single image. Our neural network can invert the camera response to reconstruct pixel irradiance before synthesizing multiple exposures and hallucinating details in under- and over-exposed regions from a single input image. To train the network, we propose a representation loss, a reconstruction loss, and a perceptual loss applied on pairs of under- and over-exposure images and thus do not require HDR images for training. Our experiments show that our proposed model can effectively reconstruct HDR images. Our qualitative and quantitative results show that our method achieves state-of-the-art performance on the DrTMO dataset. Our code is available at https://github.com/VinAIResearch/single_image_hdr.", "abstracts": [ { "abstractType": "Regular", "content": "High dynamic range (HDR) imaging is an indispensable technique in modern photography. Traditional methods focus on HDR reconstruction from multiple images, solving the core problems of image alignment, fusion, and tone mapping, yet having a perfect solution due to ghosting and other visual artifacts in the reconstruction. Recent attempts at single-image HDR reconstruction show a promising alternative: by learning to map pixel values to their irradiance using a neural network, one can bypass the align-and-merge pipeline completely yet still obtain a high-quality HDR image. In this work, we propose a weakly supervised learning method that inverts the physical image formation process for HDR reconstruction via learning to generate multiple exposures from a single image. Our neural network can invert the camera response to reconstruct pixel irradiance before synthesizing multiple exposures and hallucinating details in under- and over-exposed regions from a single input image. To train the network, we propose a representation loss, a reconstruction loss, and a perceptual loss applied on pairs of under- and over-exposure images and thus do not require HDR images for training. Our experiments show that our proposed model can effectively reconstruct HDR images. Our qualitative and quantitative results show that our method achieves state-of-the-art performance on the DrTMO dataset. Our code is available at https://github.com/VinAIResearch/single_image_hdr.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "High dynamic range (HDR) imaging is an indispensable technique in modern photography. Traditional methods focus on HDR reconstruction from multiple images, solving the core problems of image alignment, fusion, and tone mapping, yet having a perfect solution due to ghosting and other visual artifacts in the reconstruction. Recent attempts at single-image HDR reconstruction show a promising alternative: by learning to map pixel values to their irradiance using a neural network, one can bypass the align-and-merge pipeline completely yet still obtain a high-quality HDR image. In this work, we propose a weakly supervised learning method that inverts the physical image formation process for HDR reconstruction via learning to generate multiple exposures from a single image. Our neural network can invert the camera response to reconstruct pixel irradiance before synthesizing multiple exposures and hallucinating details in under- and over-exposed regions from a single input image. To train the network, we propose a representation loss, a reconstruction loss, and a perceptual loss applied on pairs of under- and over-exposure images and thus do not require HDR images for training. Our experiments show that our proposed model can effectively reconstruct HDR images. Our qualitative and quantitative results show that our method achieves state-of-the-art performance on the DrTMO dataset. Our code is available at https://github.com/VinAIResearch/single_image_hdr.", "fno": "9.346E57", "keywords": [ "Cameras", "Image Enhancement", "Image Reconstruction", "Image Resolution", "Image Restoration", "Supervised Learning", "Dr TMO Dataset", "HDR Images", "High Dynamic Range Imaging", "High Quality HDR Image", "Image Alignment", "Multiexposure Generation", "Multiple Exposures", "Neural Network", "Over Exposure Images", "Physical Image Formation Process", "Reconstruction Loss", "Single Input Image", "Single Image HDR Reconstruction", "Under Exposure Images", "Weakly Supervised Learning Method", "Training", "Photography", "Image Quality", "Visualization", "Computer Vision", "Neural Networks", "Supervised Learning", "Algorithms Computational Photography", "Image And Video Synthesis", "Low Level And Physics Based Vision" ], "authors": [ { "affiliation": "VinAI Research", "fullName": "Phuoc-Hieu Le", "givenName": "Phuoc-Hieu", "surname": "Le", "__typename": "ArticleAuthorType" }, { "affiliation": "VinAI Research", "fullName": "Quynh Le", "givenName": "Quynh", "surname": "Le", "__typename": "ArticleAuthorType" }, { "affiliation": "VinAI Research", "fullName": "Rang Nguyen", "givenName": "Rang", "surname": "Nguyen", "__typename": "ArticleAuthorType" }, { "affiliation": "VinAI Research", "fullName": "Binh-Son Hua", "givenName": "Binh-Son", "surname": "Hua", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-01-01T00:00:00", "pubType": "proceedings", "pages": "4052-4061", "year": "2023", "issn": null, "isbn": "978-1-6654-9346-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1KxVsXiEx20", "name": "pwacv202393460-010030738s1-mm_934600e052.zip", "size": "14 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pwacv202393460-010030738s1-mm_934600e052.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "9.346E48", "articleId": "1KxUJfVPJsI", "__typename": "AdjacentArticleType" }, "next": { "fno": "9.346E67", "articleId": "1KxUqFw7ej6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a041", "title": "HDR Recovery Under Rolling Shutter Distortions", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a041/12OmNAjO6Em", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528315", "title": "Unified HDR reconstruction from raw CFA data", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528315/12OmNC1oT4W", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528309", "title": "Simultaneous HDR image reconstruction and denoising for dynamic scenes", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528309/12OmNyp9Mjb", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540208", "title": "Optimal HDR reconstruction with linear digital cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540208/12OmNzahciI", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200c482", "title": "HDR Video Reconstruction: A Coarse-to-fine Network and A Real-world Benchmark Dataset", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c482/1BmJZwgUVO0", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2022/5851/0/09887659", "title": "MantissaCam: Learning Snapshot High-dynamic-range Imaging with Perceptually-based In-pixel Irradiance Encoding", "doi": null, "abstractUrl": "/proceedings-article/iccp/2022/09887659/1GZiuL29mBW", "parentPublication": { "id": "proceedings/iccp/2022/5851/0", "title": "2022 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8377", "title": "HDR-NeRF: High Dynamic Range Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8377/1H1kSeZPinK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100d981", "title": "How to cheat with metrics in single-image HDR reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100d981/1yNi4RjCYI8", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a354", "title": "HDRUNet: Single Image HDR Reconstruction with Denoising and Dequantization", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a354/1yVzZqCtsvm", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a134", "title": "Single Image HDR Reconstruction Using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a134/1ziPpIpc7a8", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3o10tCNqg", "doi": "10.1109/CVPR42600.2020.00189", "title": "Learning a Reinforced Agent for Flexible Exposure Bracketing Selection", "normalizedTitle": "Learning a Reinforced Agent for Flexible Exposure Bracketing Selection", "abstract": "Automatically selecting exposure bracketing (images exposed differently) is important to obtain a high dynamic range image by using multi-exposure fusion. Unlike previous methods that have many restrictions such as requiring camera response function, sensor noise model, and a stream of preview images with different exposures (not accessible in some scenarios e.g. mobile applications), we propose a novel deep neural network to automatically select exposure bracketing, named EBSNet, which is sufficiently flexible without having the above restrictions. EBSNet is formulated as a reinforced agent that is trained by maximizing rewards provided by a multi-exposure fusion network (MEFNet). By utilizing the illumination and semantic information extracted from just a single auto-exposure preview image, EBSNet enables to select an optimal exposure bracketing for multi-exposure fusion. EBSNet and MEFNet can be jointly trained to produce favorable results against recent state-of-the-art approaches. To facilitate future research, we provide a new benchmark dataset for multi-exposure selection and fusion.", "abstracts": [ { "abstractType": "Regular", "content": "Automatically selecting exposure bracketing (images exposed differently) is important to obtain a high dynamic range image by using multi-exposure fusion. Unlike previous methods that have many restrictions such as requiring camera response function, sensor noise model, and a stream of preview images with different exposures (not accessible in some scenarios e.g. mobile applications), we propose a novel deep neural network to automatically select exposure bracketing, named EBSNet, which is sufficiently flexible without having the above restrictions. EBSNet is formulated as a reinforced agent that is trained by maximizing rewards provided by a multi-exposure fusion network (MEFNet). By utilizing the illumination and semantic information extracted from just a single auto-exposure preview image, EBSNet enables to select an optimal exposure bracketing for multi-exposure fusion. EBSNet and MEFNet can be jointly trained to produce favorable results against recent state-of-the-art approaches. To facilitate future research, we provide a new benchmark dataset for multi-exposure selection and fusion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Automatically selecting exposure bracketing (images exposed differently) is important to obtain a high dynamic range image by using multi-exposure fusion. Unlike previous methods that have many restrictions such as requiring camera response function, sensor noise model, and a stream of preview images with different exposures (not accessible in some scenarios e.g. mobile applications), we propose a novel deep neural network to automatically select exposure bracketing, named EBSNet, which is sufficiently flexible without having the above restrictions. EBSNet is formulated as a reinforced agent that is trained by maximizing rewards provided by a multi-exposure fusion network (MEFNet). By utilizing the illumination and semantic information extracted from just a single auto-exposure preview image, EBSNet enables to select an optimal exposure bracketing for multi-exposure fusion. EBSNet and MEFNet can be jointly trained to produce favorable results against recent state-of-the-art approaches. To facilitate future research, we provide a new benchmark dataset for multi-exposure selection and fusion.", "fno": "716800b817", "keywords": [ "Cameras", "Image Fusion", "Image Resolution", "Image Sensors", "Learning Artificial Intelligence", "Neural Nets", "Single Auto Exposure Preview Image", "Optimal Exposure", "Reinforced Agent", "Flexible Exposure Bracketing Selection", "High Dynamic Range Image", "Camera Response Function", "Sensor Noise Model", "Preview Images", "Deep Neural Network", "Named EBS Net", "Multiexposure Fusion Network", "MEF Net", "Semantics", "Dynamic Range", "Lighting", "Cameras", "Feature Extraction", "Learning Artificial Intelligence", "Neural Networks" ], "authors": [ { "affiliation": "SenseTime Research; The University of Hong Kong", "fullName": "Zhouxia Wang", "givenName": "Zhouxia", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "SenseTime Research", "fullName": "Jiawei Zhang", "givenName": "Jiawei", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "SenseTime Research", "fullName": "Mude Lin", "givenName": "Mude", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "SenseTime Research", "fullName": "Jiong Wang", "givenName": "Jiong", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Hong Kong", "fullName": "Ping Luo", "givenName": "Ping", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "SenseTime Research", "fullName": "Jimmy Ren", "givenName": "Jimmy", "surname": "Ren", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "1817-1825", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800b807", "articleId": "1m3otOoU29a", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800b826", "articleId": "1m3ne7AgfZu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2014/4337/0/4337a071", "title": "Generalized Exposure Fusion Weights Estimation", "doi": null, "abstractUrl": "/proceedings-article/crv/2014/4337a071/12OmNBOlllb", "parentPublication": { "id": "proceedings/crv/2014/4337/0", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019319", "title": "Exploiting patch-based correlation for ghost removal in exposure fusion", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019319/12OmNwDj1gf", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019529", "title": "Multi-scale exposure fusion via gradient domain guided image filtering", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019529/12OmNxw5Bam", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08437183", "title": "Personalized Exposure Control Using Adaptive Metering and Reinforcement Learning", "doi": null, "abstractUrl": "/journal/tg/2019/10/08437183/13rRUIM2VBP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a041", "title": "Single-Photon Camera Guided Extreme Dynamic Range Imaging", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a041/1B13zsIYHrG", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075235", "title": "An efficient multi-exposure image fusion algorithm", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075235/1LRlkJmXwJO", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2019/3263/0/08747329", "title": "A Fast, Scalable, and Reliable Deghosting Method for Extreme Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccp/2019/08747329/1bcJwwzmhUI", "parentPublication": { "id": "proceedings/iccp/2019/3263/0", "title": "2019 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a417", "title": "Densely Connected Convolutional Networks for Multi-Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a417/1gjRwnmVemY", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093643", "title": "An Extended Exposure Fusion and its Application to Single Image Contrast Enhancement", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093643/1jPbwJf3Ta0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isceic/2021/4160/0/416000a182", "title": "Multiple-Exposure Fusion with Halo-Free Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/isceic/2021/416000a182/1yzP8eJxyc8", "parentPublication": { "id": "proceedings/isceic/2021/4160/0", "title": "2021 2nd International Symposium on Computer Engineering and Intelligent Communications (ISCEIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tpB6mNNDaM", "title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)", "acronym": "itca", "groupId": "1836624", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1tpBcxaCeB2", "doi": "10.1109/ITCA52113.2020.00019", "title": "Overview of Meta-Reinforcement Learning Research", "normalizedTitle": "Overview of Meta-Reinforcement Learning Research", "abstract": "Machine learning is a method to achieve artificial intelligence, which is divided into three categories: supervised learning, unsupervised earning, and reinforcement learning. The over-reliance of deep learning on big data restricts its development to some extent, so meta-reinforcement learning (meta-RL) research has received more and more attention. The paper introduces the background, research status and development trend of meta-reinforcement learning, which is also considered as one of the most likely ways to realize general artificial intelligence.", "abstracts": [ { "abstractType": "Regular", "content": "Machine learning is a method to achieve artificial intelligence, which is divided into three categories: supervised learning, unsupervised earning, and reinforcement learning. The over-reliance of deep learning on big data restricts its development to some extent, so meta-reinforcement learning (meta-RL) research has received more and more attention. The paper introduces the background, research status and development trend of meta-reinforcement learning, which is also considered as one of the most likely ways to realize general artificial intelligence.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Machine learning is a method to achieve artificial intelligence, which is divided into three categories: supervised learning, unsupervised earning, and reinforcement learning. The over-reliance of deep learning on big data restricts its development to some extent, so meta-reinforcement learning (meta-RL) research has received more and more attention. The paper introduces the background, research status and development trend of meta-reinforcement learning, which is also considered as one of the most likely ways to realize general artificial intelligence.", "fno": "037800a054", "keywords": [ "Big Data", "Supervised Learning", "Unsupervised Learning", "Meta Reinforcement Learning Research", "Machine Learning", "Supervised Learning", "Unsupervised Earning", "Deep Learning", "Meta RL", "Research Status", "General Artificial Intelligence", "Big Data", "Deep Learning", "Machine Learning Algorithms", "Supervised Learning", "Reinforcement Learning", "Learning Artificial Intelligence", "Market Research", "Artificial Intelligence", "Machine Learning", "Deep Learning", "Meta Reinforcement Learning" ], "authors": [ { "affiliation": "China University of Mining and Technology School of Control Science and Engineering,Xuzhou,China", "fullName": "Peng Shengguang", "givenName": "Peng", "surname": "Shengguang", "__typename": "ArticleAuthorType" } ], "idPrefix": "itca", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "54-57", "year": "2020", "issn": null, "isbn": "978-1-6654-0378-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "037800a050", "articleId": "1tpBkIm6Tcc", "__typename": "AdjacentArticleType" }, "next": { "fno": "037800a058", "articleId": "1tpB9hNlQli", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/snpd/2017/5504/0/08022767", "title": "Developing game AI agent behaving like human by mixing reinforcement learning and supervised learning", "doi": null, "abstractUrl": "/proceedings-article/snpd/2017/08022767/12OmNBqv2aH", "parentPublication": { "id": "proceedings/snpd/2017/5504/0", "title": "2017 18th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2009/3823/4/3823e849", "title": "Reinforcement Learning of Listener Response for Mood Classification of Audio", "doi": null, "abstractUrl": "/proceedings-article/cse/2009/3823e849/12OmNrJAe0d", "parentPublication": { "id": "proceedings/cse/2009/3823/2", "title": "2009 International Conference on Computational Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/insai/2021/0859/0/085900a218", "title": "Separating Explorer for Task Inference Based Meta Reinforcement Learning Algorithm", "doi": null, "abstractUrl": "/proceedings-article/insai/2021/085900a218/1CHwXR1DI1q", "parentPublication": { "id": "proceedings/insai/2021/0859/0", "title": "2021 International Conference on Networking Systems of AI (INSAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipccc/2022/8018/0/09894336", "title": "FedMC: Federated Reinforcement Learning on the Edge with Meta-Critic Networks", "doi": null, "abstractUrl": "/proceedings-article/ipccc/2022/09894336/1HpCxb2Q8qQ", "parentPublication": { "id": "proceedings/ipccc/2022/8018/0", "title": "2022 IEEE International Performance, Computing, and Communications Conference (IPCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isaiee/2022/6357/0/635700a246", "title": "Research on Reinforcement Learning algorithms in Computer Vision", "doi": null, "abstractUrl": "/proceedings-article/isaiee/2022/635700a246/1LRkZbYMv5K", "parentPublication": { "id": "proceedings/isaiee/2022/6357/0", "title": "2022 International Symposium on Advances in Informatics, Electronics and Education (ISAIEE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0/772600a357", "title": "Research on virtual human swarm football collaboration technology based on reinforcement learning", "doi": null, "abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2022/772600a357/1M4rgTRlnCE", "parentPublication": { "id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0", "title": "2022 IEEE 21st International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccea/2020/5904/0/09103854", "title": "Research on Agent Control Algorithm Based on Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/iccea/2020/09103854/1kesBMYPLfW", "parentPublication": { "id": "proceedings/iccea/2020/5904/0", "title": "2020 International Conference on Computer Engineering and Application (ICCEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800m2120", "title": "Unsupervised Reinforcement Learning of Transferable Meta-Skills for Embodied Navigation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800m2120/1m3oote81ck", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2020/8432/0/843200a178", "title": "URNAI: A Multi-Game Toolkit for Experimenting Deep Reinforcement Learning Algorithms", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2020/843200a178/1pQIL61iCkg", "parentPublication": { "id": "proceedings/sbgames/2020/8432/0", "title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2020/9074/0/907400a266", "title": "Multi-user Edge-assisted Video Analytics Task Offloading Game based on Deep Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/icpads/2020/907400a266/1rvCuntK0co", "parentPublication": { "id": "proceedings/icpads/2020/9074/0", "title": "2020 IEEE 26th International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeHUUVQqha", "doi": "10.1109/CVPR46437.2021.00904", "title": "Learning Multi-Scale Photo Exposure Correction", "normalizedTitle": "Learning Multi-Scale Photo Exposure Correction", "abstract": "Capturing photographs with wrong exposures remains a major source of errors in camera-based imaging. Exposure problems are categorized as either: (i) overexposed, where the camera exposure was too long, resulting in bright and washed-out image regions, or (ii) underexposed, where the exposure was too short, resulting in dark regions. Both under- and overexposure greatly reduce the contrast and visual appeal of an image. Prior work mainly focuses on underexposed images or general image enhancement. In contrast, our proposed method targets both over- and underexposure errors in photographs. We formulate the exposure correction problem as two main sub-problems: (i) color enhancement and (ii) detail enhancement. Accordingly, we propose a coarse-to-fine deep neural network (DNN) model, trainable in an end-to-end manner, that addresses each sub-problem separately. A key aspect of our solution is a new dataset of over 24,000 images exhibiting the broadest range of exposure values to date with a corresponding properly exposed image. Our method achieves results on par with existing state-of-the-art methods on underexposed images and yields significant improvements for images suffering from overexposure errors.", "abstracts": [ { "abstractType": "Regular", "content": "Capturing photographs with wrong exposures remains a major source of errors in camera-based imaging. Exposure problems are categorized as either: (i) overexposed, where the camera exposure was too long, resulting in bright and washed-out image regions, or (ii) underexposed, where the exposure was too short, resulting in dark regions. Both under- and overexposure greatly reduce the contrast and visual appeal of an image. Prior work mainly focuses on underexposed images or general image enhancement. In contrast, our proposed method targets both over- and underexposure errors in photographs. We formulate the exposure correction problem as two main sub-problems: (i) color enhancement and (ii) detail enhancement. Accordingly, we propose a coarse-to-fine deep neural network (DNN) model, trainable in an end-to-end manner, that addresses each sub-problem separately. A key aspect of our solution is a new dataset of over 24,000 images exhibiting the broadest range of exposure values to date with a corresponding properly exposed image. Our method achieves results on par with existing state-of-the-art methods on underexposed images and yields significant improvements for images suffering from overexposure errors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Capturing photographs with wrong exposures remains a major source of errors in camera-based imaging. Exposure problems are categorized as either: (i) overexposed, where the camera exposure was too long, resulting in bright and washed-out image regions, or (ii) underexposed, where the exposure was too short, resulting in dark regions. Both under- and overexposure greatly reduce the contrast and visual appeal of an image. Prior work mainly focuses on underexposed images or general image enhancement. In contrast, our proposed method targets both over- and underexposure errors in photographs. We formulate the exposure correction problem as two main sub-problems: (i) color enhancement and (ii) detail enhancement. Accordingly, we propose a coarse-to-fine deep neural network (DNN) model, trainable in an end-to-end manner, that addresses each sub-problem separately. A key aspect of our solution is a new dataset of over 24,000 images exhibiting the broadest range of exposure values to date with a corresponding properly exposed image. Our method achieves results on par with existing state-of-the-art methods on underexposed images and yields significant improvements for images suffering from overexposure errors.", "fno": "450900j153", "keywords": [ "Cameras", "Deep Learning Artificial Intelligence", "Image Colour Analysis", "Image Enhancement", "Camera Based Imaging", "Camera Exposure", "Image Regions", "Dark Regions", "Visual Appeal", "Underexposed Images", "General Image Enhancement", "Underexposure Errors", "Exposure Correction Problem", "Color Enhancement", "Detail Enhancement", "Coarse To Fine Deep Neural Network Model", "Image Exposure", "Overexposure Errors", "Multiscale Photo Exposure Correction Learning", "Photograph Underexposure Errors", "DNN", "Deep Learning", "Visualization", "Computer Vision", "Image Color Analysis", "Cameras", "Pattern Recognition", "Image Enhancement" ], "authors": [ { "affiliation": "Samsung AI Centre (SAIC),Toronto,Canada", "fullName": "Mahmoud Afifi", "givenName": "Mahmoud", "surname": "Afifi", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung AI Centre (SAIC),Toronto,Canada", "fullName": "Konstantinos G. Derpanis", "givenName": "Konstantinos G.", "surname": "Derpanis", "__typename": "ArticleAuthorType" }, { "affiliation": "Heidelberg University,Germany", "fullName": "Björn Ommer", "givenName": "Björn", "surname": "Ommer", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung AI Centre (SAIC),Toronto,Canada", "fullName": "Michael S. Brown", "givenName": "Michael S.", "surname": "Brown", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "9153-9163", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeHUNhf6Ni", "name": "pcvpr202145090-09577866s1-mm_450900j153.zip", "size": "15.6 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577866s1-mm_450900j153.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900j143", "articleId": "1yeMjoNZOCs", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900j164", "articleId": "1yeIOOhHx6g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2015/7082/0/07177382", "title": "Temporally consistent region-based video exposure correction", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177382/12OmNApLGCi", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ic3/2016/3251/0/07880244", "title": "Hue preserving color image enhancement using guided filter based sub image histogram equalization", "doi": null, "abstractUrl": "/proceedings-article/ic3/2016/07880244/12OmNyugyM2", "parentPublication": { "id": "proceedings/ic3/2016/3251/0", "title": "2016 Ninth International Conference on Contemporary Computing (IC3)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/06/07167723", "title": "Underexposed Video Enhancement via Perception-Driven Progressive Fusion", "doi": null, "abstractUrl": "/journal/tg/2016/06/07167723/13rRUxd2aZ5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a675", "title": "Exposure Correction Model to Enhance Image Quality", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a675/1G56dNirXnq", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnlp/2022/9544/0/954400a195", "title": "Attention Guided Network for Multi Exposure Image Fusion", "doi": null, "abstractUrl": "/proceedings-article/icnlp/2022/954400a195/1GNtgEFW0w0", "parentPublication": { "id": "proceedings/icnlp/2022/9544/0", "title": "2022 4th International Conference on Natural Language Processing (ICNLP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g033", "title": "Exposure Normalization and Compensation for Multiple-Exposure Correction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g033/1H1nk2Xf3Hi", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956188", "title": "Active Short-Long Exposure Deblurring", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956188/1IHouTvRibC", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b715", "title": "Robust Real-world Image Enhancement Based on Multi-Exposure LDR Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b715/1KxV4ZRLFvy", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2019/5045/0/504500a516", "title": "Abnormal Exposure Image Self-Adaptive Correction Algorithm", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2019/504500a516/1hHLoyerIJy", "parentPublication": { "id": "proceedings/wcmeim/2019/5045/0", "title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093643", "title": "An Extended Exposure Fusion and its Application to Single Image Contrast Enhancement", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093643/1jPbwJf3Ta0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeKp9CxQYM", "doi": "10.1109/CVPR46437.2021.01043", "title": "Auto-Exposure Fusion for Single-Image Shadow Removal", "normalizedTitle": "Auto-Exposure Fusion for Single-Image Shadow Removal", "abstract": "Shadow removal is still a challenging task due to its inherent background-dependent<sup>1</sup> and spatial-variant properties, leading to unknown and diverse shadow patterns. Even powerful deep neural networks could hardly recover traceless shadow-removed background. This paper proposes a new solution for this task by formulating it as an exposure fusion problem to address the challenges. Intuitively, we first estimate multiple over-exposure images w.r.t. the input image to let the shadow regions in these images have the same color with shadow-free areas in the input image. Then, we fuse the original input with the over-exposure images to generate the final shadow-free counterpart. Nevertheless, the spatial-variant property of the shadow requires the fusion to be sufficiently &#x2018;smart&#x2019;, that is, it should automatically select proper over-exposure pixels from different images to make the final output natural. To address this challenge, we propose the shadow-aware FusionNet that takes the shadow image as input to generate fusion weight maps across all the over-exposure images. Moreover, we propose the boundary-aware RefineNet to eliminate the remaining shadow trace further. We conduct extensive experiments on the ISTD, ISTD+, and SRD datasets to validate our method&#x2019;s effectiveness and show better performance in shadow regions and comparable performance in non-shadow regions over the state-of-the-art methods. We release the code in https://github.com/tsingqguo/exposure-fusion-shadow-removal.", "abstracts": [ { "abstractType": "Regular", "content": "Shadow removal is still a challenging task due to its inherent background-dependent<sup>1</sup> and spatial-variant properties, leading to unknown and diverse shadow patterns. Even powerful deep neural networks could hardly recover traceless shadow-removed background. This paper proposes a new solution for this task by formulating it as an exposure fusion problem to address the challenges. Intuitively, we first estimate multiple over-exposure images w.r.t. the input image to let the shadow regions in these images have the same color with shadow-free areas in the input image. Then, we fuse the original input with the over-exposure images to generate the final shadow-free counterpart. Nevertheless, the spatial-variant property of the shadow requires the fusion to be sufficiently &#x2018;smart&#x2019;, that is, it should automatically select proper over-exposure pixels from different images to make the final output natural. To address this challenge, we propose the shadow-aware FusionNet that takes the shadow image as input to generate fusion weight maps across all the over-exposure images. Moreover, we propose the boundary-aware RefineNet to eliminate the remaining shadow trace further. We conduct extensive experiments on the ISTD, ISTD+, and SRD datasets to validate our method&#x2019;s effectiveness and show better performance in shadow regions and comparable performance in non-shadow regions over the state-of-the-art methods. We release the code in https://github.com/tsingqguo/exposure-fusion-shadow-removal.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shadow removal is still a challenging task due to its inherent background-dependent1 and spatial-variant properties, leading to unknown and diverse shadow patterns. Even powerful deep neural networks could hardly recover traceless shadow-removed background. This paper proposes a new solution for this task by formulating it as an exposure fusion problem to address the challenges. Intuitively, we first estimate multiple over-exposure images w.r.t. the input image to let the shadow regions in these images have the same color with shadow-free areas in the input image. Then, we fuse the original input with the over-exposure images to generate the final shadow-free counterpart. Nevertheless, the spatial-variant property of the shadow requires the fusion to be sufficiently ‘smart’, that is, it should automatically select proper over-exposure pixels from different images to make the final output natural. To address this challenge, we propose the shadow-aware FusionNet that takes the shadow image as input to generate fusion weight maps across all the over-exposure images. Moreover, we propose the boundary-aware RefineNet to eliminate the remaining shadow trace further. We conduct extensive experiments on the ISTD, ISTD+, and SRD datasets to validate our method’s effectiveness and show better performance in shadow regions and comparable performance in non-shadow regions over the state-of-the-art methods. We release the code in https://github.com/tsingqguo/exposure-fusion-shadow-removal.", "fno": "450900k0566", "keywords": [ "Deep Learning Artificial Intelligence", "Image Colour Analysis", "Image Enhancement", "Image Fusion", "Auto Exposure Fusion", "Single Image Shadow Removal", "Spatial Variant Property", "Diverse Shadow Patterns", "Deep Neural Networks", "Traceless Shadow Removed Background", "Exposure Fusion Problem", "Over Exposure Images", "Shadow Free Areas", "Over Exposure Pixels", "Shadow Aware Fusion Net", "Shadow Image", "Fusion Weight Maps", "Nonshadow Regions", "ISTD", "SRD Datasets", "Boundary Aware Refine Net", "Degradation", "Deep Learning", "Computer Vision", "Codes", "Image Color Analysis", "Fuses", "Lighting" ], "authors": [ { "affiliation": "University of South Carolina,USA", "fullName": "Lan Fu", "givenName": "Lan", "surname": "Fu", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University,Singapore", "fullName": "Changqing Zhou", "givenName": "Changqing", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University,Singapore", "fullName": "Qing Guo", "givenName": "Qing", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "Alibaba Group,USA", "fullName": "Felix Juefei-Xu", "givenName": "Felix", "surname": "Juefei-Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Cleveland State University,USA", "fullName": "Hongkai Yu", "givenName": "Hongkai", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Tianjin University,China", "fullName": "Wei Feng", "givenName": "Wei", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University,Singapore", "fullName": "Yang Liu", "givenName": "Yang", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of South Carolina,USA", "fullName": "Song Wang", "givenName": "Song", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "10566-10575", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "450900k0556", "articleId": "1yeKx3Rv5ba", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900k0576", "articleId": "1yeIxpP3C0M", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019319", "title": "Exploiting patch-based correlation for ghost removal in exposure fusion", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019319/12OmNwDj1gf", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032e724", "title": "DeepFuse: A Deep Unsupervised Approach for Exposure Fusion with Extreme Exposure Image Pairs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e724/12OmNxQOjBX", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019529", "title": "Multi-scale exposure fusion via gradient domain guided image filtering", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019529/12OmNxw5Bam", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f617", "title": "Bijective Mapping Network for Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f617/1H1jODjaaEE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b705", "title": "Fine-Context Shadow Detection using Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b705/1L8qiohbimI", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a417", "title": "Densely Connected Convolutional Networks for Multi-Exposure Fusion", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a417/1gjRwnmVemY", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i577", "title": "Shadow Removal via Shadow Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i577/1hVlckpFsu4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a826", "title": "Shadow Removal with Paired and Unpaired Learning", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a826/1yJYlCN2G3u", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900e925", "title": "From Shadow Generation to Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900e925/1yeKbk8l5Ze", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09601181", "title": "Physics-Based Shadow Image Decomposition for Shadow Removal", "doi": null, "abstractUrl": "/journal/tp/2022/12/09601181/1yfWxXlOrVC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBaT60w", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "acronym": "fg", "groupId": "1000065", "volume": "1", "displayVolume": "1", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyKJilp", "doi": "10.1109/FG.2015.7163149", "title": "Correcting radial and perspective distortion by using face shape information", "normalizedTitle": "Correcting radial and perspective distortion by using face shape information", "abstract": "In this paper, we propose a new technique for compensating radial and perspective distortions of photos acquired with wide-angle lens by using facial features detected from the images without using predefined calibration patterns. The proposed algorithm utilizes a statistical facial feature model to recover radial distortion and the facial features are further used for adaptive cylindrical projection which will reduce perspective distortion near the image boundary. Our algorithm has several advantages over the traditional methods. First, traditional calibration patterns, like man-made straight buildings, chessboards, or calibration cubes, are not required in our method. Even though the radial distortion can be corrected by several conventional methods, most of them usually produce photos with larger perspective distortion for faces compared to our method. The system is composed of four components: offline training of the statistical facial feature model, feature point extraction from distorted faces, estimation of radial distortion parameters and compensation of radial distortion, and adaptive cylindrical projection. In order to estimate the distortion parameters, we propose an energy considering the fitness between the undistorted coordinates of the facial feature points extracted from the input distorted image and the learned statistical facial feature model. Given the distortion parameters, the fitness is calculated by solving a linear least squares system. The distortion parameters that minimize the cost function are searched in a hierarchical manner. Experimental results demonstrate the distortion reduction in the corrected images by using the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a new technique for compensating radial and perspective distortions of photos acquired with wide-angle lens by using facial features detected from the images without using predefined calibration patterns. The proposed algorithm utilizes a statistical facial feature model to recover radial distortion and the facial features are further used for adaptive cylindrical projection which will reduce perspective distortion near the image boundary. Our algorithm has several advantages over the traditional methods. First, traditional calibration patterns, like man-made straight buildings, chessboards, or calibration cubes, are not required in our method. Even though the radial distortion can be corrected by several conventional methods, most of them usually produce photos with larger perspective distortion for faces compared to our method. The system is composed of four components: offline training of the statistical facial feature model, feature point extraction from distorted faces, estimation of radial distortion parameters and compensation of radial distortion, and adaptive cylindrical projection. In order to estimate the distortion parameters, we propose an energy considering the fitness between the undistorted coordinates of the facial feature points extracted from the input distorted image and the learned statistical facial feature model. Given the distortion parameters, the fitness is calculated by solving a linear least squares system. The distortion parameters that minimize the cost function are searched in a hierarchical manner. Experimental results demonstrate the distortion reduction in the corrected images by using the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a new technique for compensating radial and perspective distortions of photos acquired with wide-angle lens by using facial features detected from the images without using predefined calibration patterns. The proposed algorithm utilizes a statistical facial feature model to recover radial distortion and the facial features are further used for adaptive cylindrical projection which will reduce perspective distortion near the image boundary. Our algorithm has several advantages over the traditional methods. First, traditional calibration patterns, like man-made straight buildings, chessboards, or calibration cubes, are not required in our method. Even though the radial distortion can be corrected by several conventional methods, most of them usually produce photos with larger perspective distortion for faces compared to our method. The system is composed of four components: offline training of the statistical facial feature model, feature point extraction from distorted faces, estimation of radial distortion parameters and compensation of radial distortion, and adaptive cylindrical projection. In order to estimate the distortion parameters, we propose an energy considering the fitness between the undistorted coordinates of the facial feature points extracted from the input distorted image and the learned statistical facial feature model. Given the distortion parameters, the fitness is calculated by solving a linear least squares system. The distortion parameters that minimize the cost function are searched in a hierarchical manner. Experimental results demonstrate the distortion reduction in the corrected images by using the proposed method.", "fno": "07163149", "keywords": [ "Face", "Facial Features", "Shape", "Feature Extraction", "Nonlinear Distortion", "Calibration" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Nat. Tsing Hua Univ., Hsinchu, Taiwan", "fullName": "Tung-Ying Lee", "givenName": null, "surname": "Tung-Ying Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Inf. Syst. & Applic., Nat. Tsing Hua Univ., Hsinchu, Taiwan", "fullName": "Tzu-Shan Chang", "givenName": null, "surname": "Tzu-Shan Chang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Nat. Tsing Hua Univ., Hsinchu, Taiwan", "fullName": "Shang-Hong Lai", "givenName": null, "surname": "Shang-Hong Lai", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-05-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2015", "issn": null, "isbn": "978-1-4799-6026-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07163148", "articleId": "12OmNC1Y5je", "__typename": "AdjacentArticleType" }, "next": { "fno": "07163150", "articleId": "12OmNwCsdxN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118a025", "title": "Critical Configurations for Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a025/12OmNBpVQ9z", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isme/2010/7669/2/05573869", "title": "A New Camera Calibration Method Based on Two Stages Distortion Model", "doi": null, "abstractUrl": "/proceedings-article/isme/2010/05573869/12OmNwBT1lH", "parentPublication": { "id": "proceedings/isme/2010/7669/2", "title": "2010 International Conference of Information Science and Management Engineering. ISME 2010", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761021", "title": "Visual metrology with uncalibrated radial distorted images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761021/12OmNwJgAKP", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a184", "title": "Generalized Radial Alignment Constraint for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a184/12OmNwnYG1M", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549353", "title": "A robust camera-based method for optical distortion calibration of head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501423", "title": "Violating Rotating Camera Geometry: The Effect of Radial Distortion on Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501423/12OmNyQYtkT", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223181", "title": "Accuracy assessment on camera calibration method not considering lens distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223181/12OmNzsrwf5", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/08/i1309", "title": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "doi": null, "abstractUrl": "/journal/tp/2007/08/i1309/13rRUxASuqt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h718", "title": "RDCFace: Radial Distortion Correction for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h718/1m3n9WusSUo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz4BdhN", "title": "2015 IEEE 56th Annual Symposium on Foundations of Computer Science (FOCS)", "acronym": "focs", "groupId": "1000292", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzUPpvn", "doi": "10.1109/FOCS.2015.50", "title": "Reality Distortion: Exact and Approximate Algorithms for Embedding into the Line", "normalizedTitle": "Reality Distortion: Exact and Approximate Algorithms for Embedding into the Line", "abstract": "We describe algorithms for the problem of minimum distortion embeddings of finite metric spaces into the real line (or a finite subset of the line). The time complexities of our algorithms are parametrized by the values of the minimum distortion, δ, and the spread, Δ, of the point set we are embedding. We consider the problem of finding the minimum distortion bijection between two finite subsets of the Euclidean line. This problem was known to have an exact polynomial time solution when δ is below a specific small constant, and hard to approximate within a factor of δ1 -- ε, when δ is polynomially large. Let D be the largest adjacent pair distance, a value potentially much smaller than Δ. Then we provide a δO(δ2 log2 D)} n{O(1)} time exact algorithm for this problem, which in particular yields a quasipolynomial running time for constant δ, and polynomial D. For the more general problem of embedding any finite metric space (X, dX) into a finite subset of the line, Y, we provide a δO(δ2) (mn)O(1) time O(1)-approximation algorithm (where |X|=n and |Y|=m), which runs in polynomial time provided δ is a constant and δ is polynomial. This in turn allows us to get a δO(δ2) (n)O(1)} time O(1)-approximation algorithm for embedding (X, dX) into the continuous real line.", "abstracts": [ { "abstractType": "Regular", "content": "We describe algorithms for the problem of minimum distortion embeddings of finite metric spaces into the real line (or a finite subset of the line). The time complexities of our algorithms are parametrized by the values of the minimum distortion, δ, and the spread, Δ, of the point set we are embedding. We consider the problem of finding the minimum distortion bijection between two finite subsets of the Euclidean line. This problem was known to have an exact polynomial time solution when δ is below a specific small constant, and hard to approximate within a factor of δ1 -- ε, when δ is polynomially large. Let D be the largest adjacent pair distance, a value potentially much smaller than Δ. Then we provide a δO(δ2 log2 D)} n{O(1)} time exact algorithm for this problem, which in particular yields a quasipolynomial running time for constant δ, and polynomial D. For the more general problem of embedding any finite metric space (X, dX) into a finite subset of the line, Y, we provide a δO(δ2) (mn)O(1) time O(1)-approximation algorithm (where |X|=n and |Y|=m), which runs in polynomial time provided δ is a constant and δ is polynomial. This in turn allows us to get a δO(δ2) (n)O(1)} time O(1)-approximation algorithm for embedding (X, dX) into the continuous real line.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe algorithms for the problem of minimum distortion embeddings of finite metric spaces into the real line (or a finite subset of the line). The time complexities of our algorithms are parametrized by the values of the minimum distortion, δ, and the spread, Δ, of the point set we are embedding. We consider the problem of finding the minimum distortion bijection between two finite subsets of the Euclidean line. This problem was known to have an exact polynomial time solution when δ is below a specific small constant, and hard to approximate within a factor of δ1 -- ε, when δ is polynomially large. Let D be the largest adjacent pair distance, a value potentially much smaller than Δ. Then we provide a δO(δ2 log2 D)} n{O(1)} time exact algorithm for this problem, which in particular yields a quasipolynomial running time for constant δ, and polynomial D. For the more general problem of embedding any finite metric space (X, dX) into a finite subset of the line, Y, we provide a δO(δ2) (mn)O(1) time O(1)-approximation algorithm (where |X|=n and |Y|=m), which runs in polynomial time provided δ is a constant and δ is polynomial. This in turn allows us to get a δO(δ2) (n)O(1)} time O(1)-approximation algorithm for embedding (X, dX) into the continuous real line.", "fno": "8191a729", "keywords": [ "Distortion", "Approximation Algorithms", "Polynomials", "Approximation Methods", "Extraterrestrial Measurements", "Yttrium", "Approximation Algorithms", "Metric Embedding", "Distortion", "Fixed Parameter Tractable" ], "authors": [ { "affiliation": null, "fullName": "Amir Nayyeri", "givenName": "Amir", "surname": "Nayyeri", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Benjamin Raichel", "givenName": "Benjamin", "surname": "Raichel", "__typename": "ArticleAuthorType" } ], "idPrefix": "focs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "729-747", "year": "2015", "issn": "0272-5428", "isbn": "978-1-4673-8191-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8191a709", "articleId": "12OmNBt3qo7", "__typename": "AdjacentArticleType" }, "next": { "fno": "8191a748", "articleId": "12OmNzsrwbW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/focs/1996/7594/0/75940627", "title": "Clique is hard to approximate within n1-ε", "doi": null, "abstractUrl": "/proceedings-article/focs/1996/75940627/12OmNC8dgcH", "parentPublication": { "id": "proceedings/focs/1996/7594/0", "title": "Proceedings of 37th Conference on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2010/6812/1/05533149", "title": "An Exact Fast Algorithm for Minimum Hitting Set", "doi": null, "abstractUrl": "/proceedings-article/cso/2010/05533149/12OmNqH9hrS", "parentPublication": { "id": "proceedings/cso/2010/6812/1", "title": "2010 Third International Joint Conference on Computational Science and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2017/3464/0/3464a001", "title": "A Nearly Optimal Lower Bound on the Approximate Degree of AC^0", "doi": null, "abstractUrl": "/proceedings-article/focs/2017/3464a001/12OmNwKoZjn", "parentPublication": { "id": "proceedings/focs/2017/3464/0", "title": "2017 IEEE 58th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2016/3933/0/3933a258", "title": "Settling the Complexity of Computing Approximate Two-Player Nash Equilibria", "doi": null, "abstractUrl": "/proceedings-article/focs/2016/3933a258/12OmNx7G66y", "parentPublication": { "id": "proceedings/focs/2016/3933/0", "title": "2016 IEEE 57th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2014/6517/0/6517a316", "title": "Novel Polynomial Basis and Its Application to Reed-Solomon Erasure Codes", "doi": null, "abstractUrl": "/proceedings-article/focs/2014/6517a316/12OmNxVDuRF", "parentPublication": { "id": "proceedings/focs/2014/6517/0", "title": "2014 IEEE 55th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2015/8191/0/8191b143", "title": "Approximate Modularity", "doi": null, "abstractUrl": "/proceedings-article/focs/2015/8191b143/12OmNxdVgS6", "parentPublication": { "id": "proceedings/focs/2015/8191/0", "title": "2015 IEEE 56th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2016/3933/0/3933a751", "title": "Robust Fourier and Polynomial Curve Fitting", "doi": null, "abstractUrl": "/proceedings-article/focs/2016/3933a751/12OmNzayNu3", "parentPublication": { "id": "proceedings/focs/2016/3933/0", "title": "2016 IEEE 57th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/istcs/1993/3630/0/00253488", "title": "Deterministic approximate counting of depth-2 circuits", "doi": null, "abstractUrl": "/proceedings-article/istcs/1993/00253488/12OmNzdoN6e", "parentPublication": { "id": "proceedings/istcs/1993/3630/0", "title": "The 2nd Israel Symposium on Theory and Computing Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2006/0310/0/04120347", "title": "Using the forward search and the polynomial approximation algorithms in the exact algorithm for manipulator's control in an unknown environment", "doi": null, "abstractUrl": "/proceedings-article/case/2006/04120347/12OmNzllxZz", "parentPublication": { "id": "proceedings/case/2006/0310/0", "title": "2006 IEEE International Conference on Automation Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2018/4230/0/423000a544", "title": "Perfect Lp Sampling in a Data Stream", "doi": null, "abstractUrl": "/proceedings-article/focs/2018/423000a544/17D45Wc1IIk", "parentPublication": { "id": "proceedings/focs/2018/4230/0", "title": "2018 IEEE 59th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyr6w5YIIU", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gysbC2QjG8", "doi": "10.1109/CVPR.2019.01209", "title": "Deep Single Image Camera Calibration With Radial Distortion", "normalizedTitle": "Deep Single Image Camera Calibration With Radial Distortion", "abstract": "Single image calibration is the problem of predicting the camera parameters from one image. This problem is of importance when dealing with images collected in uncontrolled conditions by non-calibrated cameras, such as crowd-sourced applications. In this work we propose a method to predict extrinsic (tilt and roll) and intrinsic (focal length and radial distortion) parameters from a single image. We propose a parameterization for radial distortion that is better suited for learning than directly predicting the distortion parameters. Moreover, predicting additional heterogeneous variables exacerbates the problem of loss balancing. We propose a new loss function based on point projections to avoid having to balance heterogeneous loss terms. Our method is, to our knowledge, the first to jointly estimate the tilt, roll, focal length, and radial distortion parameters from a single image. We thoroughly analyze the performance of the proposed method and the impact of the improvements and compare with previous approaches for single image radial distortion correction.", "abstracts": [ { "abstractType": "Regular", "content": "Single image calibration is the problem of predicting the camera parameters from one image. This problem is of importance when dealing with images collected in uncontrolled conditions by non-calibrated cameras, such as crowd-sourced applications. In this work we propose a method to predict extrinsic (tilt and roll) and intrinsic (focal length and radial distortion) parameters from a single image. We propose a parameterization for radial distortion that is better suited for learning than directly predicting the distortion parameters. Moreover, predicting additional heterogeneous variables exacerbates the problem of loss balancing. We propose a new loss function based on point projections to avoid having to balance heterogeneous loss terms. Our method is, to our knowledge, the first to jointly estimate the tilt, roll, focal length, and radial distortion parameters from a single image. We thoroughly analyze the performance of the proposed method and the impact of the improvements and compare with previous approaches for single image radial distortion correction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Single image calibration is the problem of predicting the camera parameters from one image. This problem is of importance when dealing with images collected in uncontrolled conditions by non-calibrated cameras, such as crowd-sourced applications. In this work we propose a method to predict extrinsic (tilt and roll) and intrinsic (focal length and radial distortion) parameters from a single image. We propose a parameterization for radial distortion that is better suited for learning than directly predicting the distortion parameters. Moreover, predicting additional heterogeneous variables exacerbates the problem of loss balancing. We propose a new loss function based on point projections to avoid having to balance heterogeneous loss terms. Our method is, to our knowledge, the first to jointly estimate the tilt, roll, focal length, and radial distortion parameters from a single image. We thoroughly analyze the performance of the proposed method and the impact of the improvements and compare with previous approaches for single image radial distortion correction.", "fno": "329300l1809", "keywords": [ "Calibration", "Cameras", "Deep Single Image Camera Calibration", "Noncalibrated Cameras", "Crowd Sourced Applications", "Loss Balancing", "Radial Distortion Parameters", "Single Image Radial Distortion Correction", "Heterogeneous Loss Variable Terms", "Camera Parameter Prediction", "Learning Systems", "Computer Vision", "Structure From Motion", "Computational Modeling", "Distortion", "Cameras", "Robustness", "3 D From Single Image", "Deep Learning", "Vision Graphics", "Vision Applications And Systems" ], "authors": [ { "affiliation": "Mapillary", "fullName": "Manuel López", "givenName": "Manuel", "surname": "López", "__typename": "ArticleAuthorType" }, { "affiliation": "CMLA, ENS Cachan", "fullName": "Roger Marí", "givenName": "Roger", "surname": "Marí", "__typename": "ArticleAuthorType" }, { "affiliation": "Mapillary", "fullName": "Pau Gargallo", "givenName": "Pau", "surname": "Gargallo", "__typename": "ArticleAuthorType" }, { "affiliation": "Mapillary AB", "fullName": "Yubin Kuang", "givenName": "Yubin", "surname": "Kuang", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Malaga", "fullName": "Javier Gonzalez-Jimenez", "givenName": "Javier", "surname": "Gonzalez-Jimenez", "__typename": "ArticleAuthorType" }, { "affiliation": "Universitat Pompeu Fabra", "fullName": "Gloria Haro", "givenName": "Gloria", "surname": "Haro", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-06-01T00:00:00", "pubType": "proceedings", "pages": "11809-11817", "year": "2019", "issn": null, "isbn": "978-1-7281-3293-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "329300l1799", "articleId": "1gyrBOgxyJq", "__typename": "AdjacentArticleType" }, "next": { "fno": "329300l1818", "articleId": "1gyryRsRNbq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118a025", "title": "Critical Configurations for Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a025/12OmNBpVQ9z", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457g691", "title": "Unsupervised Vanishing Point Detection and Camera Calibration from a Single Manhattan Image with Radial Distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g691/12OmNCwCLlq", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989b368", "title": "Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989b368/12OmNvqmUGe", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a184", "title": "Generalized Radial Alignment Constraint for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a184/12OmNwnYG1M", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501423", "title": "Violating Rotating Camera Geometry: The Effect of Radial Distortion on Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501423/12OmNyQYtkT", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c345", "title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328476", "title": "Robust Radial Distortion Correction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328476/17D45VTRoD1", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f886", "title": "Radial Distortion Invariant Factorization for Structure from Motion", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f886/1BmFAFvMMec", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900o4571", "title": "A Quasiconvex Formulation for Radial Cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900o4571/1yeMhdJcGWY", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hVlRpT15wA", "doi": "10.1109/ICCV.2019.00115", "title": "Revisiting Radial Distortion Absolute Pose", "normalizedTitle": "Revisiting Radial Distortion Absolute Pose", "abstract": "To model radial distortion there are two main approaches; either the image points are undistorted such that they correspond to pinhole projections, or the pinhole projections are distorted such that they align with the image measurements. Depending on the application, either of the two approaches can be more suitable. For example, distortion models are commonly used in Structure-from-Motion since they simplify measuring the reprojection error in images. Surprisingly, all previous minimal solvers for pose estimation with radial distortion use undistortion models. In this paper we aim to fill this gap in the literature by proposing the first minimal solvers which can jointly estimate distortion models together with camera pose. We present a general approach which can handle rational models of arbitrary degree for both distortion and undistortion.", "abstracts": [ { "abstractType": "Regular", "content": "To model radial distortion there are two main approaches; either the image points are undistorted such that they correspond to pinhole projections, or the pinhole projections are distorted such that they align with the image measurements. Depending on the application, either of the two approaches can be more suitable. For example, distortion models are commonly used in Structure-from-Motion since they simplify measuring the reprojection error in images. Surprisingly, all previous minimal solvers for pose estimation with radial distortion use undistortion models. In this paper we aim to fill this gap in the literature by proposing the first minimal solvers which can jointly estimate distortion models together with camera pose. We present a general approach which can handle rational models of arbitrary degree for both distortion and undistortion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To model radial distortion there are two main approaches; either the image points are undistorted such that they correspond to pinhole projections, or the pinhole projections are distorted such that they align with the image measurements. Depending on the application, either of the two approaches can be more suitable. For example, distortion models are commonly used in Structure-from-Motion since they simplify measuring the reprojection error in images. Surprisingly, all previous minimal solvers for pose estimation with radial distortion use undistortion models. In this paper we aim to fill this gap in the literature by proposing the first minimal solvers which can jointly estimate distortion models together with camera pose. We present a general approach which can handle rational models of arbitrary degree for both distortion and undistortion.", "fno": "480300b062", "keywords": [ "Cameras", "Computational Geometry", "Image Motion Analysis", "Pose Estimation", "Pinhole Projections", "Image Measurements", "Distortion Models", "Minimal Solvers", "Undistortion Models", "Rational Models", "Image Points", "Radial Distortion Absolute Pose", "Structure From Motion", "Reprojection Error", "Pose Estimation", "Camera Pose", "Nonlinear Distortion", "Cameras", "Optical Distortion", "Lenses", "Mathematical Model", "Calibration" ], "authors": [ { "affiliation": "ETH Zurich", "fullName": "Viktor Larsson", "givenName": "Viktor", "surname": "Larsson", "__typename": "ArticleAuthorType" }, { "affiliation": "Chalmers University of Technology", "fullName": "Torsten Sattler", "givenName": "Torsten", "surname": "Sattler", "__typename": "ArticleAuthorType" }, { "affiliation": "Czech Technical University in Prague", "fullName": "Zuzana Kukelova", "givenName": "Zuzana", "surname": "Kukelova", "__typename": "ArticleAuthorType" }, { "affiliation": "ETH Zurich / Microsoft", "fullName": "Marc Pollefeys", "givenName": "Marc", "surname": "Pollefeys", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "1062-1071", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300b052", "articleId": "1hVlAZv5zfG", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300b072", "articleId": "1hVleoUQBmU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032d867", "title": "Parameter-Free Lens Distortion Calibration of Central Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d867/12OmNB1eJyc", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989b368", "title": "Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989b368/12OmNvqmUGe", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isme/2010/7669/2/05573869", "title": "A New Camera Calibration Method Based on Two Stages Distortion Model", "doi": null, "abstractUrl": "/proceedings-article/isme/2010/05573869/12OmNwBT1lH", "parentPublication": { "id": "proceedings/isme/2010/7669/2", "title": "2010 International Conference of Information Science and Management Engineering. ISME 2010", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549353", "title": "A robust camera-based method for optical distortion calibration of head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501423", "title": "Violating Rotating Camera Geometry: The Effect of Radial Distortion on Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501423/12OmNyQYtkT", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545218", "title": "Radial Lens Distortion Correction by Adding a Weight Layer with Inverted Foveal Models to Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545218/17D45VObpQx", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328476", "title": "Robust Radial Distortion Correction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328476/17D45VTRoD1", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000b993", "title": "Radially-Distorted Conjugate Translations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000b993/17D45VsBU0T", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c984", "title": "Camera Pose Estimation with Unknown Principal Point", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c984/17D45XERmmu", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h718", "title": "RDCFace: Radial Distortion Correction for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h718/1m3n9WusSUo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3n9WusSUo", "doi": "10.1109/CVPR42600.2020.00774", "title": "RDCFace: Radial Distortion Correction for Face Recognition", "normalizedTitle": "RDCFace: Radial Distortion Correction for Face Recognition", "abstract": "The effects of radial lens distortion often appear in wide-angle cameras of surveillance and safeguard systems, which may severely degrade performances of previous face recognition algorithms. Traditional methods for radial lens distortion correction usually employ line features in scenarios that are not suitable for face images. In this paper, we propose a distortion-invariant face recognition system called RDCFace, which directly and only utilize the distorted images of faces, to alleviate the effects of radial lens distortion. RDCFace is an end-to-end trainable cascade network, which can learn rectification and alignment parameters to achieve a better face recognition performance without requiring supervision of facial landmarks and distortion parameters. We design sequential spatial transformer layers to optimize the correction, alignment, and recognition modules jointly. The feasibility of our method comes from implicitly using the statistics of the layout of face features learned from the large-scale face data. Extensive experiments indicate that our method is distortion robust and gains significant improvements on LFW, YTF, CFP, and RadialFace, a real distorted face benchmark compared with state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "The effects of radial lens distortion often appear in wide-angle cameras of surveillance and safeguard systems, which may severely degrade performances of previous face recognition algorithms. Traditional methods for radial lens distortion correction usually employ line features in scenarios that are not suitable for face images. In this paper, we propose a distortion-invariant face recognition system called RDCFace, which directly and only utilize the distorted images of faces, to alleviate the effects of radial lens distortion. RDCFace is an end-to-end trainable cascade network, which can learn rectification and alignment parameters to achieve a better face recognition performance without requiring supervision of facial landmarks and distortion parameters. We design sequential spatial transformer layers to optimize the correction, alignment, and recognition modules jointly. The feasibility of our method comes from implicitly using the statistics of the layout of face features learned from the large-scale face data. Extensive experiments indicate that our method is distortion robust and gains significant improvements on LFW, YTF, CFP, and RadialFace, a real distorted face benchmark compared with state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The effects of radial lens distortion often appear in wide-angle cameras of surveillance and safeguard systems, which may severely degrade performances of previous face recognition algorithms. Traditional methods for radial lens distortion correction usually employ line features in scenarios that are not suitable for face images. In this paper, we propose a distortion-invariant face recognition system called RDCFace, which directly and only utilize the distorted images of faces, to alleviate the effects of radial lens distortion. RDCFace is an end-to-end trainable cascade network, which can learn rectification and alignment parameters to achieve a better face recognition performance without requiring supervision of facial landmarks and distortion parameters. We design sequential spatial transformer layers to optimize the correction, alignment, and recognition modules jointly. The feasibility of our method comes from implicitly using the statistics of the layout of face features learned from the large-scale face data. Extensive experiments indicate that our method is distortion robust and gains significant improvements on LFW, YTF, CFP, and RadialFace, a real distorted face benchmark compared with state-of-the-art methods.", "fno": "716800h718", "keywords": [ "Cameras", "Face Recognition", "Image Sequences", "Learning Artificial Intelligence", "RDC Face", "Face Recognition Algorithms", "Radial Lens Distortion Correction", "Face Images", "Distortion Invariant Face Recognition System", "Face Recognition Performance", "Facial Landmarks", "Large Scale Face Data", "Distorted Face Benchmark", "Trainable Cascade Network", "Face", "Distortion", "Face Recognition", "Lenses", "Image Edge Detection", "Cameras", "Robustness" ], "authors": [ { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "He Zhao", "givenName": "He", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "Xianghua Ying", "givenName": "Xianghua", "surname": "Ying", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "Yongjie Shi", "givenName": "Yongjie", "surname": "Shi", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "Xin Tong", "givenName": "Xin", "surname": "Tong", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "Jingsi Wen", "givenName": "Jingsi", "surname": "Wen", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of EECS, Peking University", "fullName": "Hongbin Zha", "givenName": "Hongbin", "surname": "Zha", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "7718-7727", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800h707", "articleId": "1m3nijIcYta", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800h728", "articleId": "1m3osltAqTS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iciap/2003/1948/0/19480182", "title": "A Hough Transform-Based Method for Radial Lens Distortion Correction", "doi": null, "abstractUrl": "/proceedings-article/iciap/2003/19480182/12OmNvs4vra", "parentPublication": { "id": "proceedings/iciap/2003/1948/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163149", "title": "Correcting radial and perspective distortion by using face shape information", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163149/12OmNyKJilp", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c345", "title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciicii/2015/8312/0/8312a217", "title": "Fisheye Lens Distortion Correction Based on an Ellipsoidal Function Model", "doi": null, "abstractUrl": "/proceedings-article/iciicii/2015/8312a217/12OmNzVXNRA", "parentPublication": { "id": "proceedings/iciicii/2015/8312/0", "title": "2015 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2010/8833/0/05759696", "title": "A system and method for auto-correction of first order lens distortion", "doi": null, "abstractUrl": "/proceedings-article/aipr/2010/05759696/12OmNzw8jc1", "parentPublication": { "id": "proceedings/aipr/2010/8833/0", "title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/08/i1309", "title": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "doi": null, "abstractUrl": "/journal/tp/2007/08/i1309/13rRUxASuqt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545218", "title": "Radial Lens Distortion Correction by Adding a Weight Layer with Inverted Foveal Models to Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545218/17D45VObpQx", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328476", "title": "Robust Radial Distortion Correction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328476/17D45VTRoD1", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545037", "title": "Deep Learning-based Face Recognition and the Robustness to Perspective Distortion", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545037/17D45WHONrq", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wzs0vrjyWQ", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yJYuu3ZLpK", "doi": "10.1109/CVPRW53098.2021.00406", "title": "Fast Solvers for Minimal Radial Distortion Relative Pose Problems", "normalizedTitle": "Fast Solvers for Minimal Radial Distortion Relative Pose Problems", "abstract": "In this paper we present a unified formulation for a large class of relative pose problems with radial distortion and varying calibration. For minimal cases, we show that one can eliminate the number of parameters down to one to three. The relative pose can then be expressed using varying calibration constraints on the fundamental matrix, with entries that are polynomial in the parameters. We can then apply standard techniques based on the action matrix and Sturm sequences to construct our solvers. This enables efficient solvers for a large class of relative pose problems with radial distortion, using a common framework. We evaluate a number of these solvers for robust two-view inlier and epipolar geometry estimation, used as minimal solvers in RANSAC.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a unified formulation for a large class of relative pose problems with radial distortion and varying calibration. For minimal cases, we show that one can eliminate the number of parameters down to one to three. The relative pose can then be expressed using varying calibration constraints on the fundamental matrix, with entries that are polynomial in the parameters. We can then apply standard techniques based on the action matrix and Sturm sequences to construct our solvers. This enables efficient solvers for a large class of relative pose problems with radial distortion, using a common framework. We evaluate a number of these solvers for robust two-view inlier and epipolar geometry estimation, used as minimal solvers in RANSAC.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a unified formulation for a large class of relative pose problems with radial distortion and varying calibration. For minimal cases, we show that one can eliminate the number of parameters down to one to three. The relative pose can then be expressed using varying calibration constraints on the fundamental matrix, with entries that are polynomial in the parameters. We can then apply standard techniques based on the action matrix and Sturm sequences to construct our solvers. This enables efficient solvers for a large class of relative pose problems with radial distortion, using a common framework. We evaluate a number of these solvers for robust two-view inlier and epipolar geometry estimation, used as minimal solvers in RANSAC.", "fno": "489900d663", "keywords": [ "Calibration", "Cameras", "Computational Geometry", "Computer Vision", "Geometry", "Image Matching", "Matrix Algebra", "Polynomials", "Pose Estimation", "Stereo Image Processing", "Fast Solvers", "Minimal Radial Distortion Relative Pose Problems", "Unified Formulation", "Varying Calibration", "Minimal Cases", "Calibration Constraints", "Efficient Solvers", "Minimal Solvers", "Geometry", "Computer Vision", "Conferences", "Estimation", "Tools", "Distortion", "Cameras" ], "authors": [ { "affiliation": "Lund University,Centre for Mathematical Sciences,Sweden", "fullName": "Magnus Oskarsson", "givenName": "Magnus", "surname": "Oskarsson", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "3663-3672", "year": "2021", "issn": null, "isbn": "978-1-6654-4899-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "489900d654", "articleId": "1yJYuHiC6sg", "__typename": "AdjacentArticleType" }, "next": { "fno": "489900d673", "articleId": "1yXsJuM7KBa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032c335", "title": "Making Minimal Solvers for Absolute Pose Estimation Compact and Robust", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c335/12OmNAL3B8G", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206756", "title": "Pose estimation with radial distortion and unknown focal length", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206756/12OmNqG0SQX", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118a033", "title": "Minimal Solvers for Relative Pose with a Single Unknown Radial Distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a033/12OmNrFkeSu", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d605", "title": "A Clever Elimination Strategy for Efficient Minimal Solvers", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d605/12OmNywfKFh", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/12/ttp2011122410", "title": "A Minimal Solution to Radial Distortion Autocalibration", "doi": null, "abstractUrl": "/journal/tp/2011/12/ttp2011122410/13rRUwvBy9X", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/08/i1309", "title": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "doi": null, "abstractUrl": "/journal/tp/2007/08/i1309/13rRUxASuqt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1809", "title": "Deep Single Image Camera Calibration With Radial Distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1809/1gysbC2QjG8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/11/09086062", "title": "Minimal Solvers for Rectifying From Radially-Distorted Conjugate Translations", "doi": null, "abstractUrl": "/journal/tp/2021/11/09086062/1jyxuePMkQE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b750", "title": "Efficient Real-Time Radial Distortion Correction for UAVs", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b750/1uqGOZv9gly", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCf1Dpj", "title": "2014 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "acronym": "synasc", "groupId": "1001577", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAlvHNI", "doi": "10.1109/SYNASC.2014.17", "title": "On Corank Two Edge-Bipartite Graphs and Simply Extended Euclidean Diagrams", "normalizedTitle": "On Corank Two Edge-Bipartite Graphs and Simply Extended Euclidean Diagrams", "abstract": "We continue the Coxeter spectral study of finite connected loop-free edge-bipartite graphs ?, with m+2 = 3 vertices (a class of signed graphs), started in [SIAM J. Discrete Math., 27 (2013), 827-854] by means of the complex Coxeter spectrum specc ? ? C and presented in our talks given in SYNASC12 and SYNASC13. Here, we study non-negative edge-bipartite graphs of corank two, in the sense that the symmetric Gram matrix G? ? Mm+2 (Z) of ? is positive semi-definite of rank m = 1. Extending each of the simply laced Euclidean diagrams Ãm, m = 1, ?m, m = 4, ?6, ?7, ?8 by one vertex, we construct a family of loop-free corank two diagrams Ã2m, ?2m, ?26, ?27, ?28 (called simply extended Euclidean diagrams) such that they classify all connected corank two loop-free edge-bipartite graphs ?, with m + 2 = 3 vertices, up to Z-congruence ? ~z ?'. Here ? ~z ?' means that G?' = Btr ·G?'·B, for some B ? Mm+2 (Z) such that det B = ±1. We present algorithms that generate all such edge-bipartite graphs of a given size m + 2 = 3, together with their Coxeter polynomials, and the reduced Coxeter numbers, using symbolic and numeric computer calculations in Python. Moreover, we prove that for any corank two connected loop-free edge-bipartite graph ?, with m + 2 = 3 vertices, there exists a simply extended Euclidean diagram D such that ? ~z D.", "abstracts": [ { "abstractType": "Regular", "content": "We continue the Coxeter spectral study of finite connected loop-free edge-bipartite graphs ?, with m+2 = 3 vertices (a class of signed graphs), started in [SIAM J. Discrete Math., 27 (2013), 827-854] by means of the complex Coxeter spectrum specc ? ? C and presented in our talks given in SYNASC12 and SYNASC13. Here, we study non-negative edge-bipartite graphs of corank two, in the sense that the symmetric Gram matrix G? ? Mm+2 (Z) of ? is positive semi-definite of rank m = 1. Extending each of the simply laced Euclidean diagrams Ãm, m = 1, ?m, m = 4, ?6, ?7, ?8 by one vertex, we construct a family of loop-free corank two diagrams Ã2m, ?2m, ?26, ?27, ?28 (called simply extended Euclidean diagrams) such that they classify all connected corank two loop-free edge-bipartite graphs ?, with m + 2 = 3 vertices, up to Z-congruence ? ~z ?'. Here ? ~z ?' means that G?' = Btr ·G?'·B, for some B ? Mm+2 (Z) such that det B = ±1. We present algorithms that generate all such edge-bipartite graphs of a given size m + 2 = 3, together with their Coxeter polynomials, and the reduced Coxeter numbers, using symbolic and numeric computer calculations in Python. Moreover, we prove that for any corank two connected loop-free edge-bipartite graph ?, with m + 2 = 3 vertices, there exists a simply extended Euclidean diagram D such that ? ~z D.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We continue the Coxeter spectral study of finite connected loop-free edge-bipartite graphs ?, with m+2 = 3 vertices (a class of signed graphs), started in [SIAM J. Discrete Math., 27 (2013), 827-854] by means of the complex Coxeter spectrum specc ? ? C and presented in our talks given in SYNASC12 and SYNASC13. Here, we study non-negative edge-bipartite graphs of corank two, in the sense that the symmetric Gram matrix G? ? Mm+2 (Z) of ? is positive semi-definite of rank m = 1. Extending each of the simply laced Euclidean diagrams Ãm, m = 1, ?m, m = 4, ?6, ?7, ?8 by one vertex, we construct a family of loop-free corank two diagrams Ã2m, ?2m, ?26, ?27, ?28 (called simply extended Euclidean diagrams) such that they classify all connected corank two loop-free edge-bipartite graphs ?, with m + 2 = 3 vertices, up to Z-congruence ? ~z ?'. Here ? ~z ?' means that G?' = Btr ·G?'·B, for some B ? Mm+2 (Z) such that det B = ±1. We present algorithms that generate all such edge-bipartite graphs of a given size m + 2 = 3, together with their Coxeter polynomials, and the reduced Coxeter numbers, using symbolic and numeric computer calculations in Python. Moreover, we prove that for any corank two connected loop-free edge-bipartite graph ?, with m + 2 = 3 vertices, there exists a simply extended Euclidean diagram D such that ? ~z D.", "fno": "07034667", "keywords": [ "Zinc", "Vectors", "Symmetric Matrices", "Polynomials", "Labeling", "Bipartite Graph", "Kernel", "Inflation Algorithm", "Non Negative Bigraphs Of Corank Two", "Coxeter Spectral Analysis", "Euclidean Diagrams", "Z Congruence", "Quadratic Form" ], "authors": [ { "affiliation": null, "fullName": "Marcin Gasiorek", "givenName": "Marcin", "surname": "Gasiorek", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daniel Simson", "givenName": "Daniel", "surname": "Simson", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Katarzyna Zajac", "givenName": "Katarzyna", "surname": "Zajac", "__typename": "ArticleAuthorType" } ], "idPrefix": "synasc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "66-73", "year": "2014", "issn": null, "isbn": "978-1-4799-8447-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07034666", "articleId": "12OmNxbmSCy", "__typename": "AdjacentArticleType" }, "next": { "fno": "07034668", "articleId": "12OmNrnJ6MT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/synasc/2012/5026/0/06481016", "title": "On Computing Mesh Root Systems and the Isotropy Group for Simply-laced Dynkin Diagrams", "doi": null, "abstractUrl": "/proceedings-article/synasc/2012/06481016/12OmNBEpnEx", "parentPublication": { "id": "proceedings/synasc/2012/5026/0", "title": "2012 14th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2013/3035/0/06821139", "title": "On Coxeter Type Classification of Loop-Free Edge-Bipartite Graphs and Matrix Morsifications", "doi": null, "abstractUrl": "/proceedings-article/synasc/2013/06821139/12OmNCga1SH", "parentPublication": { "id": "proceedings/synasc/2013/3035/0", "title": "2013 15th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2015/8191/0/8191b358", "title": "Interlacing Families IV: Bipartite Ramanujan Graphs of All Sizes", "doi": null, "abstractUrl": "/proceedings-article/focs/2015/8191b358/12OmNqBbHDT", "parentPublication": { "id": "proceedings/focs/2015/8191/0", "title": "2015 IEEE 56th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004018", "title": "Drawing Semi-bipartite Graphs in Anchor+Matrix Style", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004018/12OmNqJq4Ao", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2013/3035/0/06821133", "title": "On Computing Non-negative Loop-Free Edge-Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/synasc/2013/06821133/12OmNvpNImh", "parentPublication": { "id": "proceedings/synasc/2013/3035/0", "title": "2013 15th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2013/5135/0/5135a529", "title": "Interlacing Families I: Bipartite Ramanujan Graphs of All Degrees", "doi": null, "abstractUrl": "/proceedings-article/focs/2013/5135a529/12OmNyv7mfG", "parentPublication": { "id": "proceedings/focs/2013/5135/0", "title": "2013 IEEE 54th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2015/7562/0/07358253", "title": "On interval edge-colorings of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2015/07358253/12OmNzUPpCq", "parentPublication": { "id": "proceedings/csit/2015/7562/0", "title": "2015 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2012/5026/0/06481054", "title": "On Coxeter Spectral Study of Posets and a Digraph Isomorphism Problem", "doi": null, "abstractUrl": "/proceedings-article/synasc/2012/06481054/12OmNzayNg2", "parentPublication": { "id": "proceedings/synasc/2012/5026/0", "title": "2012 14th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2018/7744/0/774400a020", "title": "A New Efficient Algorithm for Weighted Vertex Cover in Bipartite Graphs Based on a Dual Problem", "doi": null, "abstractUrl": "/proceedings-article/itme/2018/774400a020/17D45WIXbQY", "parentPublication": { "id": "proceedings/itme/2018/7744/0", "title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08423100", "title": "The Effect of Edge Bundling and Seriation on Sensemaking of Biclusters in Bipartite Graphs", "doi": null, "abstractUrl": "/journal/tg/2019/10/08423100/1d3e5UbWqis", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqG0SXT", "title": "2014 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "acronym": "asonam", "groupId": "1002866", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNCw3z9d", "doi": "10.1109/ASONAM.2014.6921578", "title": "Indexing bipartite memberships in web graphs", "normalizedTitle": "Indexing bipartite memberships in web graphs", "abstract": "Massive bipartite graphs are ubiquitous in real world and have important applications in social networks, biological mechanisms, etc. Consider one billion plus people on Facebook making trillions of connections with millions of organizations. Such big social bipartite graphs are often very skewed and unbalanced, on which traditional indexing algorithms do not perform optimally. In this paper, we propose Arowana, a data-driven algorithm for indexing large unbalanced bipartite graphs. Arowana achieves a high-performance efficiency by building an index tree that incorporates the semantic affinity among unbalanced graphs. Arowana uses probabilistic data structures to minimize space overhead and optimize search. In the experiments, we show that Arowana exhibits significant performance improvements and reduces space overhead over traditional indexing techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Massive bipartite graphs are ubiquitous in real world and have important applications in social networks, biological mechanisms, etc. Consider one billion plus people on Facebook making trillions of connections with millions of organizations. Such big social bipartite graphs are often very skewed and unbalanced, on which traditional indexing algorithms do not perform optimally. In this paper, we propose Arowana, a data-driven algorithm for indexing large unbalanced bipartite graphs. Arowana achieves a high-performance efficiency by building an index tree that incorporates the semantic affinity among unbalanced graphs. Arowana uses probabilistic data structures to minimize space overhead and optimize search. In the experiments, we show that Arowana exhibits significant performance improvements and reduces space overhead over traditional indexing techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Massive bipartite graphs are ubiquitous in real world and have important applications in social networks, biological mechanisms, etc. Consider one billion plus people on Facebook making trillions of connections with millions of organizations. Such big social bipartite graphs are often very skewed and unbalanced, on which traditional indexing algorithms do not perform optimally. In this paper, we propose Arowana, a data-driven algorithm for indexing large unbalanced bipartite graphs. Arowana achieves a high-performance efficiency by building an index tree that incorporates the semantic affinity among unbalanced graphs. Arowana uses probabilistic data structures to minimize space overhead and optimize search. In the experiments, we show that Arowana exhibits significant performance improvements and reduces space overhead over traditional indexing techniques.", "fno": "06921578", "keywords": [ "Indexing", "Bipartite Graph", "Vegetation", "Facebook", "Buildings" ], "authors": [ { "affiliation": "Northwestern University, Evanston, IL USA", "fullName": "Yusheng Xie", "givenName": "Yusheng", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Laboratories America, Princeton, NJ USA", "fullName": "Zhengzhang Chen", "givenName": "Zhengzhang", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Northwestern University, Evanston, IL USA", "fullName": "Diana Palsetia", "givenName": "Diana", "surname": "Palsetia", "__typename": "ArticleAuthorType" }, { "affiliation": "Northwestern University, Evanston, IL USA", "fullName": "Ankit Agrawal", "givenName": "Ankit", "surname": "Agrawal", "__typename": "ArticleAuthorType" }, { "affiliation": "Northwestern University, Evanston, IL USA", "fullName": "Alok Choudhary", "givenName": "Alok", "surname": "Choudhary", "__typename": "ArticleAuthorType" } ], "idPrefix": "asonam", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-08-01T00:00:00", "pubType": "proceedings", "pages": "166-173", "year": "2014", "issn": null, "isbn": "978-1-4799-5877-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06921577", "articleId": "12OmNBkxsvu", "__typename": "AdjacentArticleType" }, "next": { "fno": "06921579", "articleId": "12OmNwKGAlI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bigdata-congress/2014/5057/0/06906756", "title": "Rectangle Counting in Large Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/bigdata-congress/2014/06906756/12OmNBtCCLP", "parentPublication": { "id": "proceedings/bigdata-congress/2014/5057/0", "title": "2014 IEEE International Congress on Big Data (BigData Congress)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004018", "title": "Drawing Semi-bipartite Graphs in Anchor+Matrix Style", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004018/12OmNqJq4Ao", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571375", "title": "Drawing Clustered Bipartite Graphs in Multi-circular Style", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571375/12OmNvEhfYC", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcsw/2010/4079/0/4079a099", "title": "Structural Graph Indexing for Mining Complex Networks", "doi": null, "abstractUrl": "/proceedings-article/icdcsw/2010/4079a099/12OmNx965GQ", "parentPublication": { "id": "proceedings/icdcsw/2010/4079/0", "title": "2010 IEEE 30th International Conference on Distributed Computing Systems Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2015/7562/0/07358253", "title": "On interval edge-colorings of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2015/07358253/12OmNzUPpCq", "parentPublication": { "id": "proceedings/csit/2015/7562/0", "title": "2015 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/2/3494b471", "title": "Ordering Bipartite Graphs by their Minimal Energies", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b471/12OmNzWfoZp", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2021/0770/0/077000a259", "title": "Complete Graphs and Bipartite Graphs in a Random Graph", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2021/077000a259/1APq5FO8TBK", "parentPublication": { "id": "proceedings/icvisp/2021/0770/0", "title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2022/9747/0/974700a304", "title": "Families of Butterfly Counting Algorithms for Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2022/974700a304/1Fu9kUXc6S4", "parentPublication": { "id": "proceedings/ipdpsw/2022/9747/0", "title": "2022 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300b887", "title": "Maximal Balanced Signed Biclique Enumeration in Signed Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300b887/1FwFrrnHATC", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2021/9184/0/918400a085", "title": "Efficient and Effective Community Search on Large-scale Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2021/918400a085/1uGXp3tCuPK", "parentPublication": { "id": "proceedings/icde/2021/9184/0", "title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxzMnU0", "title": "2011 15th International Conference on Information Visualisation", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNqJq4Ao", "doi": "10.1109/IV.2011.24", "title": "Drawing Semi-bipartite Graphs in Anchor+Matrix Style", "normalizedTitle": "Drawing Semi-bipartite Graphs in Anchor+Matrix Style", "abstract": "A bipartite graph consists of a set of nodes that can be divided into two partitions such that no edge has both endpoints in the same partition. A semi-bipartite graph is a bipartite graph with edges in one partition. Anchored map is a graph drawing technique for bipartite graphs and provides aesthetically pleasing layouts of graphs with high readability by restricting the positions of nodes in a partition. For this research, the objects of the anchored map technique were extended to semi-bipartite graphs. A hybrid layout style of anchored maps and matrix representations are proposed, and an automatic drawing technique is shown. The proposed technique arranges the nodes in one partition on a circumference like the anchored map of bipartite graphs. It also divides nodes in the other partition with edges into clusters and represents them in the matrix representations to make it easy to see connective subsets.", "abstracts": [ { "abstractType": "Regular", "content": "A bipartite graph consists of a set of nodes that can be divided into two partitions such that no edge has both endpoints in the same partition. A semi-bipartite graph is a bipartite graph with edges in one partition. Anchored map is a graph drawing technique for bipartite graphs and provides aesthetically pleasing layouts of graphs with high readability by restricting the positions of nodes in a partition. For this research, the objects of the anchored map technique were extended to semi-bipartite graphs. A hybrid layout style of anchored maps and matrix representations are proposed, and an automatic drawing technique is shown. The proposed technique arranges the nodes in one partition on a circumference like the anchored map of bipartite graphs. It also divides nodes in the other partition with edges into clusters and represents them in the matrix representations to make it easy to see connective subsets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A bipartite graph consists of a set of nodes that can be divided into two partitions such that no edge has both endpoints in the same partition. A semi-bipartite graph is a bipartite graph with edges in one partition. Anchored map is a graph drawing technique for bipartite graphs and provides aesthetically pleasing layouts of graphs with high readability by restricting the positions of nodes in a partition. For this research, the objects of the anchored map technique were extended to semi-bipartite graphs. A hybrid layout style of anchored maps and matrix representations are proposed, and an automatic drawing technique is shown. The proposed technique arranges the nodes in one partition on a circumference like the anchored map of bipartite graphs. It also divides nodes in the other partition with edges into clusters and represents them in the matrix representations to make it easy to see connective subsets.", "fno": "06004018", "keywords": [ "Data Visualisation", "Graph Theory", "Matrix Algebra", "Set Theory", "Technical Drawing", "Semibipartite Graph Drawing", "Anchor Matrix Style", "Anchored Map", "Matrix Representations", "Automatic Drawing Technique", "Bipartite Graph", "Layout", "Visualization", "Correlation", "Joining Processes", "Indium Tin Oxide", "Social Network Services", "Network Visualization", "Graph Drawing", "Semi Bipartite Graph", "Anchored Map", "Matrix Representation" ], "authors": [ { "affiliation": null, "fullName": "Kazuo Misue", "givenName": "Kazuo", "surname": "Misue", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qi Zhou", "givenName": "Qi", "surname": "Zhou", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-07-01T00:00:00", "pubType": "proceedings", "pages": "26-31", "year": "2011", "issn": "1550-6037", "isbn": "978-1-4577-0868-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06004017", "articleId": "12OmNArthca", "__typename": "AdjacentArticleType" }, "next": { "fno": "06004019", "articleId": "12OmNzzxutY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/netcom/2009/3924/0/3924a046", "title": "K Bipartite Partitioning Algorithm for Channel Allocation Problem in Wireless Mesh Networks", "doi": null, "abstractUrl": "/proceedings-article/netcom/2009/3924a046/12OmNAObbz1", "parentPublication": { "id": "proceedings/netcom/2009/3924/0", "title": "Networks &amp; Communications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2011/4409/0/4409a833", "title": "Visual Analysis of Bipartite Networks", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2011/4409a833/12OmNBpEeXd", "parentPublication": { "id": "proceedings/icdmw/2011/4409/0", "title": "2011 IEEE 11th International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdata-congress/2014/5057/0/06906756", "title": "Rectangle Counting in Large Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/bigdata-congress/2014/06906756/12OmNBtCCLP", "parentPublication": { "id": "proceedings/bigdata-congress/2014/5057/0", "title": "2014 IEEE International Congress on Big Data (BigData Congress)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571375", "title": "Drawing Clustered Bipartite Graphs in Multi-circular Style", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571375/12OmNvEhfYC", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsym/2016/3438/0/07858445", "title": "Total k-Domatic Partition on Some Classes of Graphs", "doi": null, "abstractUrl": "/proceedings-article/compsym/2016/07858445/12OmNviHKeH", "parentPublication": { "id": "proceedings/compsym/2016/3438/0", "title": "2016 International Computer Symposium (ICS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cyberc/2011/4557/0/4557a357", "title": "Unbiased Sampling of Bipartite Graph", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2011/4557a357/12OmNy5zsnM", "parentPublication": { "id": "proceedings/cyberc/2011/4557/0", "title": "2011 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2015/7562/0/07358253", "title": "On interval edge-colorings of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2015/07358253/12OmNzUPpCq", "parentPublication": { "id": "proceedings/csit/2015/7562/0", "title": "2015 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/2/3494b471", "title": "Ordering Bipartite Graphs by their Minimal Energies", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b471/12OmNzWfoZp", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09842309", "title": "Fast Flexible Bipartite Graph Model for Co-Clustering", "doi": null, "abstractUrl": "/journal/tk/5555/01/09842309/1FlLZY68ZQk", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/12/09366975", "title": "Approximately Counting Butterflies in Large Bipartite Graph Streams", "doi": null, "abstractUrl": "/journal/tk/2022/12/09366975/1rDQL4T5Wzm", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyRxFnO", "title": "2015 Computer Science and Information Technologies (CSIT)", "acronym": "csit", "groupId": "1810164", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzUPpCq", "doi": "10.1109/CSITechnol.2015.7358253", "title": "On interval edge-colorings of bipartite graphs", "normalizedTitle": "On interval edge-colorings of bipartite graphs", "abstract": "An edge-coloring of a graph G with colors 1,…, t is an interval t-coloring if all colors are used, and the colors of edges incident to each vertex of G are distinct and form an interval of integers. A graph G is interval colorable if it has an interval t-coloring for some positive integer t. The set of interval colorable graphs is denoted by R. Recently, Toft has conjectured that all bipartite graphs with maximum degree at most 4 are interval colorable. In this paper we prove that: 1) if G is a bipartite graph with Δ(G) ≤ 4, then G□K2 N R; 2) if G is a bipartite graph with Δ (G) = 5 and without a vertex of degree 3, then G□K2 N R; 3) if G is a bipartite graph with Δ(G) = 6 and it has a 2-factor, then G□K2 N N. In 1999, Giaro using computer-aided methods showed that all bipartite graphs on at most 14 vertices are interval colorable. On the other hand, the smallest known examples of interval non-colorable bipartite graphs have 19 vertices. In this paper we also observe that several classes of bipartite graphs of small order have an interval coloring. In particular, we show that all bipartite graphs on 15 vertices are interval colorable.", "abstracts": [ { "abstractType": "Regular", "content": "An edge-coloring of a graph G with colors 1,…, t is an interval t-coloring if all colors are used, and the colors of edges incident to each vertex of G are distinct and form an interval of integers. A graph G is interval colorable if it has an interval t-coloring for some positive integer t. The set of interval colorable graphs is denoted by R. Recently, Toft has conjectured that all bipartite graphs with maximum degree at most 4 are interval colorable. In this paper we prove that: 1) if G is a bipartite graph with Δ(G) ≤ 4, then G□K2 N R; 2) if G is a bipartite graph with Δ (G) = 5 and without a vertex of degree 3, then G□K2 N R; 3) if G is a bipartite graph with Δ(G) = 6 and it has a 2-factor, then G□K2 N N. In 1999, Giaro using computer-aided methods showed that all bipartite graphs on at most 14 vertices are interval colorable. On the other hand, the smallest known examples of interval non-colorable bipartite graphs have 19 vertices. In this paper we also observe that several classes of bipartite graphs of small order have an interval coloring. In particular, we show that all bipartite graphs on 15 vertices are interval colorable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An edge-coloring of a graph G with colors 1,…, t is an interval t-coloring if all colors are used, and the colors of edges incident to each vertex of G are distinct and form an interval of integers. A graph G is interval colorable if it has an interval t-coloring for some positive integer t. The set of interval colorable graphs is denoted by R. Recently, Toft has conjectured that all bipartite graphs with maximum degree at most 4 are interval colorable. In this paper we prove that: 1) if G is a bipartite graph with Δ(G) ≤ 4, then G□K2 N R; 2) if G is a bipartite graph with Δ (G) = 5 and without a vertex of degree 3, then G□K2 N R; 3) if G is a bipartite graph with Δ(G) = 6 and it has a 2-factor, then G□K2 N N. In 1999, Giaro using computer-aided methods showed that all bipartite graphs on at most 14 vertices are interval colorable. On the other hand, the smallest known examples of interval non-colorable bipartite graphs have 19 vertices. In this paper we also observe that several classes of bipartite graphs of small order have an interval coloring. In particular, we show that all bipartite graphs on 15 vertices are interval colorable.", "fno": "07358253", "keywords": [ "Bipartite Graph", "Color", "Informatics", "Hypercubes", "Electronic Mail", "5 G Mobile Communication", "Computer Experiment", "Edge Coloring", "Interval Edge Coloring", "Near Interval Coloring", "Bipartite Graph", "Biregular Bipartite Graph", "Hypercube" ], "authors": [ { "affiliation": "Institute for Informatics and Automation Problems of NAS RA, 0014, Yerevan, Armenia", "fullName": "Petros Petrosyan", "givenName": "Petros", "surname": "Petrosyan", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Informatics and Applied Mathematics, Yerevan State University, 0025, Yerevan, Armenia", "fullName": "Hrant Khachatrian", "givenName": "Hrant", "surname": "Khachatrian", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Informatics and Applied Mathematics, Yerevan State University, 0025, Yerevan, Armenia", "fullName": "Tigran Mamikonyan", "givenName": "Tigran", "surname": "Mamikonyan", "__typename": "ArticleAuthorType" } ], "idPrefix": "csit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "71-76", "year": "2015", "issn": null, "isbn": "978-1-4673-7562-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07358252", "articleId": "12OmNz61du0", "__typename": "AdjacentArticleType" }, "next": { "fno": "07358254", "articleId": "12OmNx76TF3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/synasc/2014/8447/0/07034667", "title": "On Corank Two Edge-Bipartite Graphs and Simply Extended Euclidean Diagrams", "doi": null, "abstractUrl": "/proceedings-article/synasc/2014/07034667/12OmNAlvHNI", "parentPublication": { "id": "proceedings/synasc/2014/8447/0", "title": "2014 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2014/5877/0/06921578", "title": "Indexing bipartite memberships in web graphs", "doi": null, "abstractUrl": "/proceedings-article/asonam/2014/06921578/12OmNCw3z9d", "parentPublication": { "id": "proceedings/asonam/2014/5877/0", "title": "2014 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2012/4655/0/4655a064", "title": "An Algorithm for Determining Critical Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/is3c/2012/4655a064/12OmNqzcvJt", "parentPublication": { "id": "proceedings/is3c/2012/4655/0", "title": "Computer, Consumer and Control, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2016/1192/0/1192a143", "title": "Constructions of Uniquely 3-Colorable Graphs", "doi": null, "abstractUrl": "/proceedings-article/dsc/2016/1192a143/12OmNyfdOLT", "parentPublication": { "id": "proceedings/dsc/2016/1192/0", "title": "2016 IEEE First International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2007/2776/0/04151818", "title": "On the Dominator Colorings in Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/itng/2007/04151818/12OmNyoAA28", "parentPublication": { "id": "proceedings/itng/2007/2776/0", "title": "2007 4th International Conference on Information Technology New Generations", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2013/2460/0/06710340", "title": "On interval edge-colorings of complete tripartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2013/06710340/12OmNz2kqpO", "parentPublication": { "id": "proceedings/csit/2013/2460/0", "title": "2013 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2017/3220/1/08005931", "title": "Equitable Neighbor Sum Distinguishing Edge Colorings of Some Graphs", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2017/08005931/17D45VN31gY", "parentPublication": { "id": "proceedings/cse-euc/2017/3220/1", "title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2021/0770/0/077000a259", "title": "Complete Graphs and Bipartite Graphs in a Random Graph", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2021/077000a259/1APq5FO8TBK", "parentPublication": { "id": "proceedings/icvisp/2021/0770/0", "title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300b887", "title": "Maximal Balanced Signed Biclique Enumeration in Signed Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300b887/1FwFrrnHATC", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2020/7445/0/09150381", "title": "Kronecker Graph Generation with Ground Truth for 4-Cycles and Dense Structure in Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2020/09150381/1lPGDwFrzFe", "parentPublication": { "id": "proceedings/ipdpsw/2020/7445/0", "title": "2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBqMDoJ", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "acronym": "isise", "groupId": "1002561", "volume": "2", "displayVolume": "2", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNzWfoZp", "doi": "10.1109/ISISE.2008.33", "title": "Ordering Bipartite Graphs by their Minimal Energies", "normalizedTitle": "Ordering Bipartite Graphs by their Minimal Energies", "abstract": "The energy of a graph is defined as the sum of the absolute values of its eigenvalues. Let be the class of connected bipartite graphs with exactly two vertex-disjoint cycles. The graphs with minimal energies in is determined by [18]. In this paper, we determine the second to sixth graphs with minimal energy in the above ordering.", "abstracts": [ { "abstractType": "Regular", "content": "The energy of a graph is defined as the sum of the absolute values of its eigenvalues. Let be the class of connected bipartite graphs with exactly two vertex-disjoint cycles. The graphs with minimal energies in is determined by [18]. In this paper, we determine the second to sixth graphs with minimal energy in the above ordering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The energy of a graph is defined as the sum of the absolute values of its eigenvalues. Let be the class of connected bipartite graphs with exactly two vertex-disjoint cycles. The graphs with minimal energies in is determined by [18]. In this paper, we determine the second to sixth graphs with minimal energy in the above ordering.", "fno": "3494b471", "keywords": [ "Energy", "Eigenvalues", "Bipartite Graph" ], "authors": [ { "affiliation": null, "fullName": "Fuyi Wei", "givenName": "Fuyi", "surname": "Wei", "__typename": "ArticleAuthorType" } ], "idPrefix": "isise", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "471-474", "year": "2008", "issn": null, "isbn": "978-0-7695-3494-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3494b467", "articleId": "12OmNxxNbXE", "__typename": "AdjacentArticleType" }, "next": { "fno": "3494b475", "articleId": "12OmNzwpUgH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2013/5108/0/5108a389", "title": "BIG-ALIGN: Fast Bipartite Graph Alignment", "doi": null, "abstractUrl": "/proceedings-article/icdm/2013/5108a389/12OmNBhHtaH", "parentPublication": { "id": "proceedings/icdm/2013/5108/0", "title": "2013 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2014/5877/0/06921578", "title": "Indexing bipartite memberships in web graphs", "doi": null, "abstractUrl": "/proceedings-article/asonam/2014/06921578/12OmNCw3z9d", "parentPublication": { "id": "proceedings/asonam/2014/5877/0", "title": "2014 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2015/8191/0/8191b358", "title": "Interlacing Families IV: Bipartite Ramanujan Graphs of All Sizes", "doi": null, "abstractUrl": "/proceedings-article/focs/2015/8191b358/12OmNqBbHDT", "parentPublication": { "id": "proceedings/focs/2015/8191/0", "title": "2015 IEEE 56th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995567", "title": "Exhaustive family of energies minimizable exactly by a graph cut", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995567/12OmNwkR5Bj", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cyberc/2011/4557/0/4557a357", "title": "Unbiased Sampling of Bipartite Graph", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2011/4557a357/12OmNy5zsnM", "parentPublication": { "id": "proceedings/cyberc/2011/4557/0", "title": "2011 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispan/2002/1579/0/15790185", "title": "Interconnection Networks and Their Eigenvalues", "doi": null, "abstractUrl": "/proceedings-article/ispan/2002/15790185/12OmNyFCw0w", "parentPublication": { "id": "proceedings/ispan/2002/1579/0", "title": "Proceedings 2002 International Symposium on Parallel Architectures, Algorithms, and Networks", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2013/5135/0/5135a529", "title": "Interlacing Families I: Bipartite Ramanujan Graphs of All Degrees", "doi": null, "abstractUrl": "/proceedings-article/focs/2013/5135a529/12OmNyv7mfG", "parentPublication": { "id": "proceedings/focs/2013/5135/0", "title": "2013 IEEE 54th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2015/7562/0/07358253", "title": "On interval edge-colorings of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2015/07358253/12OmNzUPpCq", "parentPublication": { "id": "proceedings/csit/2015/7562/0", "title": "2015 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisim/2007/2894/0/28940087", "title": "On Toeplitz Matrices Minimal Eigenvalues in Moving Object Description", "doi": null, "abstractUrl": "/proceedings-article/cisim/2007/28940087/12OmNzV70lB", "parentPublication": { "id": "proceedings/cisim/2007/2894/0", "title": "2007 6th International Conference on Computer Information Systems and Industrial Management Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300b887", "title": "Maximal Balanced Signed Biclique Enumeration in Signed Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300b887/1FwFrrnHATC", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirV", "title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)", "acronym": "itme", "groupId": "1002567", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WIXbQY", "doi": "10.1109/ITME.2018.00016", "title": "A New Efficient Algorithm for Weighted Vertex Cover in Bipartite Graphs Based on a Dual Problem", "normalizedTitle": "A New Efficient Algorithm for Weighted Vertex Cover in Bipartite Graphs Based on a Dual Problem", "abstract": "The (un-weighted) vertex cover problem in general graphs is a classical NP-hard problem, but it is polynomial time solvable in bipartite graphs. This paper considers two combinatorial optimization problems. One is the weighted vertex cover problem and the other is the so-called maximum edge packing problem. We proved that in bipartite graphs, maximum edge packing problem can be viewed as the dual of the weighted vertex cover problem, and hence these two problems are polynomial time solvable. We explored the relationships between these two problems in bipartite graphs and some structural results are obtained accordingly. Furthermore, a new efficient algorithm for the weighted vertex cover problem in bipartite graphs is also derived. Our method generalized some previous algorithms for un-weighted vertex cover in bipartite graphs.", "abstracts": [ { "abstractType": "Regular", "content": "The (un-weighted) vertex cover problem in general graphs is a classical NP-hard problem, but it is polynomial time solvable in bipartite graphs. This paper considers two combinatorial optimization problems. One is the weighted vertex cover problem and the other is the so-called maximum edge packing problem. We proved that in bipartite graphs, maximum edge packing problem can be viewed as the dual of the weighted vertex cover problem, and hence these two problems are polynomial time solvable. We explored the relationships between these two problems in bipartite graphs and some structural results are obtained accordingly. Furthermore, a new efficient algorithm for the weighted vertex cover problem in bipartite graphs is also derived. Our method generalized some previous algorithms for un-weighted vertex cover in bipartite graphs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The (un-weighted) vertex cover problem in general graphs is a classical NP-hard problem, but it is polynomial time solvable in bipartite graphs. This paper considers two combinatorial optimization problems. One is the weighted vertex cover problem and the other is the so-called maximum edge packing problem. We proved that in bipartite graphs, maximum edge packing problem can be viewed as the dual of the weighted vertex cover problem, and hence these two problems are polynomial time solvable. We explored the relationships between these two problems in bipartite graphs and some structural results are obtained accordingly. Furthermore, a new efficient algorithm for the weighted vertex cover problem in bipartite graphs is also derived. Our method generalized some previous algorithms for un-weighted vertex cover in bipartite graphs.", "fno": "774400a020", "keywords": [ "Bipartite Graph", "Optimization", "NP Hard Problem", "Particle Separators", "Approximation Algorithms", "Information Technology", "Education", "Matching", "Vertex Cover", "Edge Packing", "Polynomial Time Algorithm", "Dual Problem" ], "authors": [ { "affiliation": null, "fullName": "Yujiao Zhang", "givenName": "Yujiao", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiao Duan", "givenName": "Xiao", "surname": "Duan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xuerong Yue", "givenName": "Xuerong", "surname": "Yue", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhibin Chen", "givenName": "Zhibin", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "itme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "20-23", "year": "2018", "issn": null, "isbn": "978-1-5386-7744-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "774400a015", "articleId": "17D45XeKgo5", "__typename": "AdjacentArticleType" }, "next": { "fno": "774400a024", "articleId": "17D45WcjjRn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icis/2016/0806/0/07550782", "title": "A fast heuristic for the minimum weight vertex cover problem", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550782/12OmNAle6Xn", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2014/6517/0/6517a130", "title": "Complexity of Counting Subgraphs: Only the Boundedness of the Vertex-Cover Number Counts", "doi": null, "abstractUrl": "/proceedings-article/focs/2014/6517a130/12OmNxAlzZT", "parentPublication": { "id": "proceedings/focs/2014/6517/0", "title": "2014 IEEE 55th Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa/2008/3471/0/3471a301", "title": "Broadcasting in Weighted-Vertex Graphs", "doi": null, "abstractUrl": "/proceedings-article/ispa/2008/3471a301/12OmNxzuMLb", "parentPublication": { "id": "proceedings/ispa/2008/3471/0", "title": "2008 IEEE International Symposium on Parallel and Distributed Processing with Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2015/7562/0/07358253", "title": "On interval edge-colorings of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/csit/2015/07358253/12OmNzUPpCq", "parentPublication": { "id": "proceedings/csit/2015/7562/0", "title": "2015 Computer Science and Information Technologies (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/2/3494b471", "title": "Ordering Bipartite Graphs by their Minimal Energies", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b471/12OmNzWfoZp", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2016/4459/0/4459a686", "title": "Random Walk in Large Real-World Graphs for Finding Smaller Vertex Cover", "doi": null, "abstractUrl": "/proceedings-article/ictai/2016/4459a686/12OmNzlUKEI", "parentPublication": { "id": "proceedings/ictai/2016/4459/0", "title": "2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09889176", "title": "Finding the Maximum <inline-formula><tex-math notation=\"LaTeX\">Z_$k$_Z</tex-math></inline-formula>-Balanced Biclique on Weighted Bipartite Graphs", "doi": null, "abstractUrl": "/journal/tk/5555/01/09889176/1GDrnzNt5Re", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2020/9621/0/962100a412", "title": "Edge-Weighted Online Bipartite Matching", "doi": null, "abstractUrl": "/proceedings-article/focs/2020/962100a412/1qyxvL8VZcc", "parentPublication": { "id": "proceedings/focs/2020/9621/0", "title": "2020 IEEE 61st Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceic/2020/8573/0/857300a145", "title": "An Asynchronous Game-based algorithm to the Weighted Vertex Cover of Networks", "doi": null, "abstractUrl": "/proceedings-article/icceic/2020/857300a145/1rCgtkrutBm", "parentPublication": { "id": "proceedings/icceic/2020/8573/0", "title": "2020 International Conference on Computer Engineering and Intelligent Control (ICCEIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2021/9184/0/918400a085", "title": "Efficient and Effective Community Search on Large-scale Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2021/918400a085/1uGXp3tCuPK", "parentPublication": { "id": "proceedings/icde/2021/9184/0", "title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }