data
dict
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1boUCpNK", "doi": "10.1109/VR.2019.8798063", "title": "I Got Your Point: An Investigation of Pointing Cues in a Spherical Fish Tank Virtual Reality Display", "normalizedTitle": "I Got Your Point: An Investigation of Pointing Cues in a Spherical Fish Tank Virtual Reality Display", "abstract": "Pointing is a fundamental building block in human communication. While it is ubiquitous in our daily interactions within the real world, it is difficult to precisely interpret a virtual agent's pointing direction to the physical world, considering its complex and subtle gesture cues, such as the movements of the human hand and head. Fish Tank Virtual Reality (FTVR) display has the potential to provide accurate pointing cues as it creates a compelling 3D spatial effect by rendering perspective-corrected vision. In this paper, we conducted a study with pointing cues of three levels (Head-only, Hand-only, and Hand+Head) to evaluate how the head and hand gesture cues affect observers' performance in interpretation of where a virtual agent is pointing in a spherical FTVR display. The results showed that the hand gesture significantly helps people interpret the pointing both accurately and quickly for fine pointing (15°), with 19.4% higher accuracy and 1.42 seconds faster than the head cue. The combination of the head and hand yielded a small improvement on the accuracy (4.4%) with even slightly longer time (0.38 seconds) compared to the hand-only cue. However, for coarse pointing (30°), head cue appears to be sufficient with the accuracy of 90.2%. The result of this study provides guidelines on cues selection for designing pointing in the virtual environment.", "abstracts": [ { "abstractType": "Regular", "content": "Pointing is a fundamental building block in human communication. While it is ubiquitous in our daily interactions within the real world, it is difficult to precisely interpret a virtual agent's pointing direction to the physical world, considering its complex and subtle gesture cues, such as the movements of the human hand and head. Fish Tank Virtual Reality (FTVR) display has the potential to provide accurate pointing cues as it creates a compelling 3D spatial effect by rendering perspective-corrected vision. In this paper, we conducted a study with pointing cues of three levels (Head-only, Hand-only, and Hand+Head) to evaluate how the head and hand gesture cues affect observers' performance in interpretation of where a virtual agent is pointing in a spherical FTVR display. The results showed that the hand gesture significantly helps people interpret the pointing both accurately and quickly for fine pointing (15°), with 19.4% higher accuracy and 1.42 seconds faster than the head cue. The combination of the head and hand yielded a small improvement on the accuracy (4.4%) with even slightly longer time (0.38 seconds) compared to the hand-only cue. However, for coarse pointing (30°), head cue appears to be sufficient with the accuracy of 90.2%. The result of this study provides guidelines on cues selection for designing pointing in the virtual environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Pointing is a fundamental building block in human communication. While it is ubiquitous in our daily interactions within the real world, it is difficult to precisely interpret a virtual agent's pointing direction to the physical world, considering its complex and subtle gesture cues, such as the movements of the human hand and head. Fish Tank Virtual Reality (FTVR) display has the potential to provide accurate pointing cues as it creates a compelling 3D spatial effect by rendering perspective-corrected vision. In this paper, we conducted a study with pointing cues of three levels (Head-only, Hand-only, and Hand+Head) to evaluate how the head and hand gesture cues affect observers' performance in interpretation of where a virtual agent is pointing in a spherical FTVR display. The results showed that the hand gesture significantly helps people interpret the pointing both accurately and quickly for fine pointing (15°), with 19.4% higher accuracy and 1.42 seconds faster than the head cue. The combination of the head and hand yielded a small improvement on the accuracy (4.4%) with even slightly longer time (0.38 seconds) compared to the hand-only cue. However, for coarse pointing (30°), head cue appears to be sufficient with the accuracy of 90.2%. The result of this study provides guidelines on cues selection for designing pointing in the virtual environment.", "fno": "08798063", "keywords": [ "Computer Displays", "Gesture Recognition", "Rendering Computer Graphics", "Virtual Reality", "Visual Perception", "Accurate Pointing Cues", "Hand Gesture Cues", "Virtual Agent", "Spherical FTVR Display", "Head Cue", "Hand Only Cue", "Coarse Pointing", "Cues Selection", "Virtual Environment", "Spherical Fish Tank Virtual Reality Display", "Fundamental Building Block", "Human Communication", "Complex Gesture Cues", "Subtle Gesture Cues", "3 D Spatial Effect", "Three Dimensional Displays", "Task Analysis", "Virtual Environments", "Fish", "Rendering Computer Graphics", "Observers", "Human Centered Computing", "Human Computer Interaction", "Interaction Design", "Empirical Studies In Interaction Design" ], "authors": [ { "affiliation": "University of British Columbia, BC, Canada", "fullName": "Fan Wu", "givenName": "Fan", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of British Columbia, BC, Canada", "fullName": "Qian Zhou", "givenName": "Qian", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "University of British Columbia, BC, Canada", "fullName": "Kyoungwon Seo", "givenName": "Kyoungwon", "surname": "Seo", "__typename": "ArticleAuthorType" }, { "affiliation": "Complex Information Science Field, Future University Hakodate, Hakodate, Japan", "fullName": "Toshiro Kashiwagi", "givenName": "Toshiro", "surname": "Kashiwagi", "__typename": "ArticleAuthorType" }, { "affiliation": "University of British Columbia, BC, Canada", "fullName": "Sidney Fels", "givenName": "Sidney", "surname": "Fels", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1237-1238", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798318", "articleId": "1cJ0P8vBhhm", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797984", "articleId": "1cJ1gLtY5qg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2000/0478/0/04780091", "title": "Enhancing Fish Tank VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780091/12OmNAY79hZ", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1991/2163/0/00131972", "title": "Interaction of visual depth cues and viewing parameters during simulated telemanipulation", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131972/12OmNBDyAa3", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892376", "title": "3DPS: An auto-calibrated three-dimensional perspective-corrected spherical display", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892376/12OmNC2OSOD", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2015/9795/0/9795a849", "title": "Pointing Gesture Recognition Using Robot Head Control", "doi": null, "abstractUrl": "/proceedings-article/csci/2015/9795a849/12OmNz4SOqV", "parentPublication": { "id": "proceedings/csci/2015/9795/0", "title": "2015 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642347", "title": "An Evaluation of Depth and Size Perception on a Spherical Fish Tank Virtual Reality Display", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642347/17PYEjbrJk7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a719", "title": "Spatial Updating in Virtual Reality – Auditory and Visual Cues in a Cave Automatic Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a719/1CJch0MXduw", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797975", "title": "Do Head-Mounted Display Stereo Deficiencies Affect 3D Pointing Tasks in AR and VR?", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797975/1cJ0L4GQvSM", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797905", "title": "Investigating Spherical Fish Tank Virtual Reality Displays for Establishing Realistic Eye-Contact", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797905/1cJ0PcNhP1K", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798362", "title": "Match the Cube: Investigation of the Head-coupled Input with a Spherical Fish Tank Virtual Reality Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798362/1cJ16r0nRSM", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a050", "title": "The Effects of Virtual Avatar Visibility on Pointing Interpretation by Observers in 3D Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a050/1yeDa4aaGY0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNApcuag", "title": "IEEE Haptics Symposium 2008", "acronym": "haptics", "groupId": "1000312", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNAndiu9", "doi": "10.1109/HAPTICS.2008.4479927", "title": "Force Amplitude Perception in Six Orthogonal Directions", "normalizedTitle": "Force Amplitude Perception in Six Orthogonal Directions", "abstract": "This paper presents three psychophysical experiments that attempt to determine whether human perception of force amplitude is isotropic in a virtual environment (VE). Participants employed passive or active forces in the same or different directions. Our results indicate that, regardless of whether stimulus presentation and response are active or passive, human perception of force amplitude is in fact anisotropic. Implications of these results for design of VR systems are briefly discussed.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents three psychophysical experiments that attempt to determine whether human perception of force amplitude is isotropic in a virtual environment (VE). Participants employed passive or active forces in the same or different directions. Our results indicate that, regardless of whether stimulus presentation and response are active or passive, human perception of force amplitude is in fact anisotropic. Implications of these results for design of VR systems are briefly discussed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents three psychophysical experiments that attempt to determine whether human perception of force amplitude is isotropic in a virtual environment (VE). Participants employed passive or active forces in the same or different directions. Our results indicate that, regardless of whether stimulus presentation and response are active or passive, human perception of force amplitude is in fact anisotropic. Implications of these results for design of VR systems are briefly discussed.", "fno": "04479927", "keywords": [ "Force Feedback", "Haptic Interfaces", "Virtual Reality", "Force Amplitude Perception", "Psychophysical Experiment", "Human Perception", "Virtual Environment", "Passive Forces", "Active Forces", "Humans", "Haptic Interfaces", "Force Feedback", "Virtual Reality", "Virtual Environment", "Surgery", "Psychology", "Muscles", "Anisotropic Magnetoresistance", "Computer Displays", "Force Amplitude Perception", "Active And Passive Touch", "Anisotropic", "Difference Threshold", "Orthogonal Directions", "H 1 2 User Machine Systems Human Factors Human Information Processing", "H 5 2 User Interfaces Haptic I O Ergonomics" ], "authors": [ { "affiliation": "Envision Center for Data Perceptualization, Purdue University, 128 Memorial Mall, West Lafayette, IN 47906, USA, e-mail: edorjgo@purdue.edu", "fullName": "Enkhtuvshin Dorjgotov", "givenName": "Enkhtuvshin", "surname": "Dorjgotov", "__typename": "ArticleAuthorType" }, { "affiliation": "Envision Center for Data Perceptualization, Purdue University, 128 Memorial Mall, West Lafayette, IN 47906, USA, e-mail: bertoline@purdue.edu", "fullName": "Gary R. Bertoline", "givenName": "Gary R.", "surname": "Bertoline", "__typename": "ArticleAuthorType" }, { "affiliation": "Envision Center for Data Perceptualization, Purdue University, 128 Memorial Mall, West Lafayette, IN 47906, USA, e-mail: larns@purdue.edu", "fullName": "Laura Arns", "givenName": "Laura", "surname": "Arns", "__typename": "ArticleAuthorType" }, { "affiliation": "Envision Center for Data Perceptualization, Purdue University, 128 Memorial Mall, West Lafayette, IN 47906, USA, e-mail: pizlo@psych.purdue.edu", "fullName": "Zygmunt Pizlo", "givenName": "Zygmunt", "surname": "Pizlo", "__typename": "ArticleAuthorType" }, { "affiliation": "Envision Center for Data Perceptualization, Purdue University, 128 Memorial Mall, West Lafayette, IN 47906, USA, e-mail: dunlops@purdue.edu", "fullName": "Steven R. Dunlop", "givenName": "Steven R.", "surname": "Dunlop", "__typename": "ArticleAuthorType" } ], "idPrefix": "haptics", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-03-01T00:00:00", "pubType": "proceedings", "pages": "121-127", "year": "2008", "issn": "2324-7347", "isbn": "978-1-4244-2005-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04479922", "articleId": "12OmNx3ZjdV", "__typename": "AdjacentArticleType" }, "next": { "fno": "04479913", "articleId": "12OmNAolH2A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gcis/2013/2885/0/06805930", "title": "Research on Active Suspension Control Strategy Based on the Model with Parameters of Hydraulic System", "doi": null, "abstractUrl": "/proceedings-article/gcis/2013/06805930/12OmNAolGPo", "parentPublication": { "id": "proceedings/gcis/2013/2885/0", "title": "2013 Fourth Global Congress on Intelligent Systems (GCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2006/0597/0/04067736", "title": "Palm-sized Attraction Force Display Exploiting the Nonlinearity of Perception", "doi": null, "abstractUrl": "/proceedings-article/iswc/2006/04067736/12OmNB1wkFI", "parentPublication": { "id": "proceedings/iswc/2006/0597/0", "title": "2006 10th IEEE International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2001/0948/0/09480007", "title": "Passive Force Display Using ER Brakes and its Control Experiments", "doi": null, "abstractUrl": "/proceedings-article/vr/2001/09480007/12OmNCcbEgU", "parentPublication": { "id": "proceedings/vr/2001/0948/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/27380342", "title": "Tilt Perception by Constant Tactile and Constant Proprioceptive Feedback through a Human System Interface", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/27380342/12OmNro0IcJ", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2003/1890/0/18900430", "title": "Observations on and Modifications to the Rutgers Master to Support a Mixture of Passive Haptics and Active Force Feedback", "doi": null, "abstractUrl": "/proceedings-article/haptics/2003/18900430/12OmNyL0ThQ", "parentPublication": { "id": "proceedings/haptics/2003/1890/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/04/07506075", "title": "Active Manual Movement Improves Directional Perception of Illusory Force", "doi": null, "abstractUrl": "/journal/th/2016/04/07506075/13rRUwkxc5w", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642384", "title": "Orientation Perception in Real and Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642384/17PYEkASbnU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797962", "title": "Muscleblazer: Force-Feedback Suit for Immersive Experience", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797962/1cJ0Qo05MTm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797826", "title": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797826/1cJ18Y9D9Di", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwMXnv0", "title": "2014 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNBpmDG4", "doi": "10.1109/VR.2014.6802054", "title": "Time perception during walking in virtual environments", "normalizedTitle": "Time perception during walking in virtual environments", "abstract": "A large body of literature has analyzed differences between perception in the real world and virtual environments (VE) in terms of space, distance and speed perception. So far, no empirical data has been collected for time misperception in immersive VEs to our knowledge. However, there is evidence that time perception can deviate from veridical judgments, for instance, due to visual or auditive stimulation related to motion misperception. In this work we evaluate time perception during walking motions with a pilot study in an immersive head-mounted display (HMD) environment. Significant differences between time judgments in the real and virtual environment could not be observed.", "abstracts": [ { "abstractType": "Regular", "content": "A large body of literature has analyzed differences between perception in the real world and virtual environments (VE) in terms of space, distance and speed perception. So far, no empirical data has been collected for time misperception in immersive VEs to our knowledge. However, there is evidence that time perception can deviate from veridical judgments, for instance, due to visual or auditive stimulation related to motion misperception. In this work we evaluate time perception during walking motions with a pilot study in an immersive head-mounted display (HMD) environment. Significant differences between time judgments in the real and virtual environment could not be observed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A large body of literature has analyzed differences between perception in the real world and virtual environments (VE) in terms of space, distance and speed perception. So far, no empirical data has been collected for time misperception in immersive VEs to our knowledge. However, there is evidence that time perception can deviate from veridical judgments, for instance, due to visual or auditive stimulation related to motion misperception. In this work we evaluate time perception during walking motions with a pilot study in an immersive head-mounted display (HMD) environment. Significant differences between time judgments in the real and virtual environment could not be observed.", "fno": "06802054", "keywords": [ "Legged Locomotion", "Virtual Environments", "Estimation", "Psychology", "Time Measurement", "Layout", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities" ], "authors": [ { "affiliation": "Immersive Media Group (IMG), Department of Computer Science, University of Würzburg", "fullName": "Gerd Bruder", "givenName": "Gerd", "surname": "Bruder", "__typename": "ArticleAuthorType" }, { "affiliation": "Immersive Media Group (IMG), Department of Computer Science, University of Würzburg", "fullName": "Frank Steinicke", "givenName": "Frank", "surname": "Steinicke", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-03-01T00:00:00", "pubType": "proceedings", "pages": "67-68", "year": "2014", "issn": null, "isbn": "978-1-4799-2871-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06802053", "articleId": "12OmNCbU2Wt", "__typename": "AdjacentArticleType" }, "next": { "fno": "06802055", "articleId": "12OmNyRg4FC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2007/0905/0/04161000", "title": "Elucidating Factors that can Facilitate Veridical Spatial Perception in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161000/12OmNAtaRZ9", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802053", "title": "An enhanced steering algorithm for redirected walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892353", "title": "On exploring the mitigation of distance misperception in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892353/12OmNqIhFRN", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2006/0224/0/02240003", "title": "Distance Perception in Immersive Virtual Environments, Revisited", "doi": null, "abstractUrl": "/proceedings-article/vr/2006/02240003/12OmNvm6VKz", "parentPublication": { "id": "proceedings/vr/2006/0224/0", "title": "IEEE Virtual Reality Conference (VR 2006)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550194", "title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811007", "title": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811007/12OmNyeWdKg", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504709", "title": "The effect of multi-sensory cues on performance and experience during walking in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504709/12OmNyrqzC0", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642384", "title": "Orientation Perception in Real and Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642384/17PYEkASbnU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798209", "title": "Enactive Approach to Assess Perceived Speed Error during Walking and Running in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798209/1cI6auzeLYY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090453", "title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVXNJh", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNy49sEA", "doi": "10.1109/3DUI.2015.7131756", "title": "Distance perception during cooperative virtual locomotion", "normalizedTitle": "Distance perception during cooperative virtual locomotion", "abstract": "Virtual distances are often misperceived, though most past research ignores co-located cooperative systems. Because active locomotion plays a role in spatial perception, cooperative viewpoint control may impact perceived distances. Additionally, the center of projection is generally optimized for a single tracked user, meaning that a single action will result in different visual feedback for each user. We describe a study investigating the effect of a co-located cooperative locomotion interface on virtual distance perception. Results indicate that a slight center-of-projection offset did affect distance estimates for the untracked user, but that the cooperation actions themselves did not play a role. This study brings new insights to designing interfaces which facilitate accurate spatial perception in cooperative applications.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual distances are often misperceived, though most past research ignores co-located cooperative systems. Because active locomotion plays a role in spatial perception, cooperative viewpoint control may impact perceived distances. Additionally, the center of projection is generally optimized for a single tracked user, meaning that a single action will result in different visual feedback for each user. We describe a study investigating the effect of a co-located cooperative locomotion interface on virtual distance perception. Results indicate that a slight center-of-projection offset did affect distance estimates for the untracked user, but that the cooperation actions themselves did not play a role. This study brings new insights to designing interfaces which facilitate accurate spatial perception in cooperative applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual distances are often misperceived, though most past research ignores co-located cooperative systems. Because active locomotion plays a role in spatial perception, cooperative viewpoint control may impact perceived distances. Additionally, the center of projection is generally optimized for a single tracked user, meaning that a single action will result in different visual feedback for each user. We describe a study investigating the effect of a co-located cooperative locomotion interface on virtual distance perception. Results indicate that a slight center-of-projection offset did affect distance estimates for the untracked user, but that the cooperation actions themselves did not play a role. This study brings new insights to designing interfaces which facilitate accurate spatial perception in cooperative applications.", "fno": "07131756", "keywords": [ "Legged Locomotion", "Visualization", "Head", "Target Tracking", "Atmospheric Measurements", "Particle Measurements" ], "authors": [ { "affiliation": "Arts et Métiers ParisTech - CNRS Le2i, Institut Image, France", "fullName": "William E. Marsh", "givenName": "William E.", "surname": "Marsh", "__typename": "ArticleAuthorType" }, { "affiliation": "Arts et Métiers ParisTech - CNRS Le2i, Institut Image, France", "fullName": "Jean-Remy Chardonnet", "givenName": "Jean-Remy", "surname": "Chardonnet", "__typename": "ArticleAuthorType" }, { "affiliation": "Arts et Métiers ParisTech - CNRS Le2i, Institut Image, France", "fullName": "Frederic Merienne", "givenName": "Frederic", "surname": "Merienne", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "173-174", "year": "2015", "issn": null, "isbn": "978-1-4673-6886-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07131755", "articleId": "12OmNwAKCNT", "__typename": "AdjacentArticleType" }, "next": { "fno": "07131757", "articleId": "12OmNBpVQdv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550193", "title": "Tapping-In-Place: Increasing the naturalness of immersive walking-in-place locomotion through novel gestural input", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550193/12OmNAnMuyq", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131732", "title": "Carryover effects of calibration to visual and proprioceptive information on near field distance judgments in 3D user interaction", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131732/12OmNBr4exE", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2015/7204/0/7204a033", "title": "Investigating the Distance Compression on Virtual Environments by Comparing Visualization Devices", "doi": null, "abstractUrl": "/proceedings-article/svr/2015/7204a033/12OmNzUxOco", "parentPublication": { "id": "proceedings/svr/2015/7204/0", "title": "2015 XVII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446235", "title": "Influences on the Elicitation of Interpersonal Space with Virtual Humans", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446235/13bd1eW2l9F", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07946183", "title": "Walking with Virtual People: Evaluation of Locomotion Interfaces in Dynamic Environments", "doi": null, "abstractUrl": "/journal/tg/2018/07/07946183/13rRUEgs2C2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404588", "title": "Recalibration of Perceived Distance in Virtual Environments Occurs Rapidly and Transfers Asymmetrically Across Scale", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404588/13rRUyuegh9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09744001", "title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08762207", "title": "Locomotion in Place in Virtual Reality: A Comparative Evaluation of Joystick, Teleport, and Leaning", "doi": null, "abstractUrl": "/journal/tg/2021/01/08762207/1bIeI0S82Aw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089654", "title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09495106", "title": "A Wheelchair Locomotion Interface in a VR Disability Simulation Reduces Implicit Bias", "doi": null, "abstractUrl": "/journal/tg/2022/12/09495106/1vyjtwGIZkQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNASrawz", "title": "2009 IEEE Virtual Reality Conference", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyeWdKg", "doi": "10.1109/VR.2009.4811007", "title": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays", "normalizedTitle": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays", "abstract": "How do users of virtual environments perceive virtual space? Many experiments have explored this question, but most of these have used head-mounted immersive displays. This paper reports an experiment that studied large-screen immersive displays at medium-field distances of 2 to 15 meters. The experiment measured ego-centric depth judgments in a CAVE, a tiled display wall, and a real-world outdoor field as a control condition. We carefully modeled the outdoor field to make the three environments as similar as possible. Measuring egocentric depth judgments in large-screen immersive displays requires adapting new measurement protocols; the experiment used timed imagined walking, verbal estimation, and triangulated blind walking. We found that depth judgments from timed imagined walking and verbal estimation were very similar in all three environments. However, triangulated blind walking was accurate only in the out-door field; in the large-screen immersive displays it showed under-estimation effects that were likely caused by insufficient physical space to perform the technique. These results suggest using timed imagined walking as a primary protocol for assessing depth perception in large-screen immersive displays. We also found that depth judgments in the CAVE were more accurate than in the tiled display wall, which suggests that the peripheral scenery offered by the CAVE is helpful when perceiving virtual space.", "abstracts": [ { "abstractType": "Regular", "content": "How do users of virtual environments perceive virtual space? Many experiments have explored this question, but most of these have used head-mounted immersive displays. This paper reports an experiment that studied large-screen immersive displays at medium-field distances of 2 to 15 meters. The experiment measured ego-centric depth judgments in a CAVE, a tiled display wall, and a real-world outdoor field as a control condition. We carefully modeled the outdoor field to make the three environments as similar as possible. Measuring egocentric depth judgments in large-screen immersive displays requires adapting new measurement protocols; the experiment used timed imagined walking, verbal estimation, and triangulated blind walking. We found that depth judgments from timed imagined walking and verbal estimation were very similar in all three environments. However, triangulated blind walking was accurate only in the out-door field; in the large-screen immersive displays it showed under-estimation effects that were likely caused by insufficient physical space to perform the technique. These results suggest using timed imagined walking as a primary protocol for assessing depth perception in large-screen immersive displays. We also found that depth judgments in the CAVE were more accurate than in the tiled display wall, which suggests that the peripheral scenery offered by the CAVE is helpful when perceiving virtual space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "How do users of virtual environments perceive virtual space? Many experiments have explored this question, but most of these have used head-mounted immersive displays. This paper reports an experiment that studied large-screen immersive displays at medium-field distances of 2 to 15 meters. The experiment measured ego-centric depth judgments in a CAVE, a tiled display wall, and a real-world outdoor field as a control condition. We carefully modeled the outdoor field to make the three environments as similar as possible. Measuring egocentric depth judgments in large-screen immersive displays requires adapting new measurement protocols; the experiment used timed imagined walking, verbal estimation, and triangulated blind walking. We found that depth judgments from timed imagined walking and verbal estimation were very similar in all three environments. However, triangulated blind walking was accurate only in the out-door field; in the large-screen immersive displays it showed under-estimation effects that were likely caused by insufficient physical space to perform the technique. These results suggest using timed imagined walking as a primary protocol for assessing depth perception in large-screen immersive displays. We also found that depth judgments in the CAVE were more accurate than in the tiled display wall, which suggests that the peripheral scenery offered by the CAVE is helpful when perceiving virtual space.", "fno": "04811007", "keywords": [ "Helmet Mounted Displays", "User Interfaces", "Virtual Reality", "Measurement Protocols", "Medium Field Distance Perception", "Large Screen Immersive Displays", "Virtual Environments", "Egocentric Depth Judgments", "Verbal Estimation", "Triangulated Blind Walking", "Protocols", "Large Screen Displays", "Legged Locomotion", "Virtual Environment", "Layout", "Particle Measurements", "Observers", "Face Detection", "Calibration", "Area Measurement", "Distance Perception", "Egocentric Depth Perception", "Virtual Environments", "Large Screen Immersive Displays", "I 2 10 Artifical Intelligence Vision And Scene Understanding Perceptual Reasoning", "H 5 2 Information Interfaces And Presentation User Interfaces Ergonomics" ], "authors": [ { "affiliation": "NVIDIA Corporation, University of California e-mail: eklein@nvidia.com", "fullName": "Eric Klein", "givenName": "Eric", "surname": "Klein", "__typename": "ArticleAuthorType" }, { "affiliation": "Mississippi State University e-mail: swan@acm.org.", "fullName": "J. Edward Swan II", "givenName": "J. Edward", "surname": "Swan", "__typename": "ArticleAuthorType" }, { "affiliation": "SPADAC Inc. Naval Research Laboratory, e-mail: gsschmidt1@yahoo.com", "fullName": "Gregory S. Schmidt", "givenName": "Gregory S.", "surname": "Schmidt", "__typename": "ArticleAuthorType" }, { "affiliation": "Naval Research Laboratory e-mail: markl@itd.nrl.navy.mil.", "fullName": "Mark A. Livingston", "givenName": "Mark A.", "surname": "Livingston", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock, University of California, e-mail: staadt@acm.org.", "fullName": "Oliver G. Staadt", "givenName": "Oliver G.", "surname": "Staadt", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-03-01T00:00:00", "pubType": "proceedings", "pages": "107-113", "year": "2009", "issn": "1087-8270", "isbn": "978-1-4244-3943-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04811005", "articleId": "12OmNARRYxy", "__typename": "AdjacentArticleType" }, "next": { "fno": "04811009", "articleId": "12OmNB8CiYX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811009", "title": "Quantification of Contrast Sensitivity and Color Perception using Head-worn Augmented Reality Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811009/12OmNB8CiYX", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802054", "title": "Time perception during walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802054/12OmNBpmDG4", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892292", "title": "Corrective feedback for depth perception in CAVE-like systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892292/12OmNrNh0Ml", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2010/6846/0/05444717", "title": "The implementation of a novel walking interface within an immersive display", "doi": null, "abstractUrl": "/proceedings-article/3dui/2010/05444717/12OmNx19k1n", "parentPublication": { "id": "proceedings/3dui/2010/6846/0", "title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549392", "title": "Unintended positional drift and its potential solutions", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549392/12OmNxXCGFc", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2011/0372/0/06122623", "title": "VISIE: A spatially immersive interaction environment using real-time human measurement", "doi": null, "abstractUrl": "/proceedings-article/grc/2011/06122623/12OmNyen1sx", "parentPublication": { "id": "proceedings/grc/2011/0372/0", "title": "2011 IEEE International Conference on Granular Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446539", "title": "Investigating the Effects of Anthropomorphic Fidelity of Self-Avatars on Near Field Depth Perception in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446539/13bd1h03qOe", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/02/mcg2018020044", "title": "15 Years of Research on Redirected Walking in Immersive Virtual Environments", "doi": null, "abstractUrl": "/magazine/cg/2018/02/mcg2018020044/13rRUxcsYOr", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642384", "title": "Orientation Perception in Real and Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642384/17PYEkASbnU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199575", "title": "Eyes-free Target Acquisition During Walking in Immersive Mixed Reality", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199575/1ncgpmtzdn2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAD", "title": "2010 IEEE Virtual Reality Conference (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyoAA64", "doi": "10.1109/VR.2010.5444791", "title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments", "normalizedTitle": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments", "abstract": "A number of studies have reported that distance judgments are underestimated in virtual environments (VE) when compared to those made in the real world. Studies have also reported that providing users with visual feedback in the VE improves their distance perception and made them feel more immersed in the virtual world. In this study, we investigated the effect of tactile feedback and visual manipulation of the VE on egocentric distance perception. In contrast to previous studies which have focused on task-specific and error-corrective feedback (for example, providing knowledge about the errors in distance estimations), we demonstrate that exploratory feedback is sufficient for reducing errors in distance estimation. In Experiment 1, the effects of different types of feedback (visual, tactile and visual plus tactile) on distance judgments were studied. Tactile feedback was given to participants as they explored and touched objects in a VE. Results showed that distance judgments improved in the VE regardless of the type of sensory feedback provided. In Experiment 2, we presented a real world environment to the participants and then situated them in a VE that was either a replica or an altered representation of the real world environment. Results showed that participants made significant underestimation in their distance judgments when the VE was not a replica of the physical space. We further found that providing both visual and tactile feedback did not reduce distance compression in such a situation. These results are discussed in the light of the nature of feedback provided and how assumptions about the VE may affect distance perception in virtual environments.", "abstracts": [ { "abstractType": "Regular", "content": "A number of studies have reported that distance judgments are underestimated in virtual environments (VE) when compared to those made in the real world. Studies have also reported that providing users with visual feedback in the VE improves their distance perception and made them feel more immersed in the virtual world. In this study, we investigated the effect of tactile feedback and visual manipulation of the VE on egocentric distance perception. In contrast to previous studies which have focused on task-specific and error-corrective feedback (for example, providing knowledge about the errors in distance estimations), we demonstrate that exploratory feedback is sufficient for reducing errors in distance estimation. In Experiment 1, the effects of different types of feedback (visual, tactile and visual plus tactile) on distance judgments were studied. Tactile feedback was given to participants as they explored and touched objects in a VE. Results showed that distance judgments improved in the VE regardless of the type of sensory feedback provided. In Experiment 2, we presented a real world environment to the participants and then situated them in a VE that was either a replica or an altered representation of the real world environment. Results showed that participants made significant underestimation in their distance judgments when the VE was not a replica of the physical space. We further found that providing both visual and tactile feedback did not reduce distance compression in such a situation. These results are discussed in the light of the nature of feedback provided and how assumptions about the VE may affect distance perception in virtual environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A number of studies have reported that distance judgments are underestimated in virtual environments (VE) when compared to those made in the real world. Studies have also reported that providing users with visual feedback in the VE improves their distance perception and made them feel more immersed in the virtual world. In this study, we investigated the effect of tactile feedback and visual manipulation of the VE on egocentric distance perception. In contrast to previous studies which have focused on task-specific and error-corrective feedback (for example, providing knowledge about the errors in distance estimations), we demonstrate that exploratory feedback is sufficient for reducing errors in distance estimation. In Experiment 1, the effects of different types of feedback (visual, tactile and visual plus tactile) on distance judgments were studied. Tactile feedback was given to participants as they explored and touched objects in a VE. Results showed that distance judgments improved in the VE regardless of the type of sensory feedback provided. In Experiment 2, we presented a real world environment to the participants and then situated them in a VE that was either a replica or an altered representation of the real world environment. Results showed that participants made significant underestimation in their distance judgments when the VE was not a replica of the physical space. We further found that providing both visual and tactile feedback did not reduce distance compression in such a situation. These results are discussed in the light of the nature of feedback provided and how assumptions about the VE may affect distance perception in virtual environments.", "fno": "05444791", "keywords": [ "Error Corrective Feedback", "Tactile Feedback", "Egocentric Distance Perception", "Virtual Environments", "Visual Feedback", "Task Specific Feedback" ], "authors": [ { "affiliation": null, "fullName": "Farahnaz Ahmed", "givenName": "Farahnaz", "surname": "Ahmed", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Joseph D Cohen", "givenName": "Joseph D", "surname": "Cohen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Katherine S Binder", "givenName": "Katherine S", "surname": "Binder", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Claude L Fennema", "givenName": "Claude L", "surname": "Fennema", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "195-202", "year": "2010", "issn": null, "isbn": "978-1-4244-6237-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444790", "articleId": "12OmNBTs7wG", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444788", "articleId": "12OmNx5YvmD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2007/0907/0/04142855", "title": "Tactile Feedback at the Finger Tips for Improved Direct Interaction in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142855/12OmNBWi6KF", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890327", "title": "TextureExplorer: A Tactile and Force Display for Virtual Textures", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890327/12OmNqBbHwI", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2002/1492/0/14920275", "title": "Perceived Egocentric Distances in Real, Image-Based, and Traditional Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920275/12OmNwHhoQ2", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/achi/2009/3529/0/3529a234", "title": "Reconfiguration of Vibro-tactile Feedback Based on Drivers' Sitting Attitude", "doi": null, "abstractUrl": "/proceedings-article/achi/2009/3529a234/12OmNyKa6bz", "parentPublication": { "id": "proceedings/achi/2009/3529/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145163", "title": "Tactile Perception of Rotational Sliding", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145163/12OmNzd7bxr", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iros/1995/7108/3/71083114", "title": "A tactile array sensor layered in an artificial skin", "doi": null, "abstractUrl": "/proceedings-article/iros/1995/71083114/12OmNzkuKE4", "parentPublication": { "id": "proceedings/iros/1995/7108/3", "title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/01/07234937", "title": "Rich Pinch: Perception of Object Movement with Tactile Illusion", "doi": null, "abstractUrl": "/journal/th/2016/01/07234937/13rRUEgarnR", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07080902", "title": "Tactile Feedback of Object Slip Facilitates Virtual Object Manipulation", "doi": null, "abstractUrl": "/journal/th/2015/04/07080902/13rRUNvyakX", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2009/02/tth2009020103", "title": "Tactile Feedback Induces Reduced Grasping Force in Robot-Assisted Surgery", "doi": null, "abstractUrl": "/journal/th/2009/02/tth2009020103/13rRUwInvla", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/02/tth2013020217", "title": "Integration of Force Reflection with Tactile Sensing for Minimally Invasive Robotics-Assisted Tumor Localization", "doi": null, "abstractUrl": "/journal/th/2013/02/tth2013020217/13rRUy0qnGs", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ18Y9D9Di", "doi": "10.1109/VR.2019.8797826", "title": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality", "normalizedTitle": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality", "abstract": "The topic of distance perception has been widely investigated in Virtual Reality (VR). However, the vast majority of previous work mainly focused on distance perception of objects placed in front of the observer. Then, what happens when the observer looks on the side? In this paper, we study differences in distance estimation when comparing objects placed in front of the observer with objects placed on his side. Through a series of four experiments (n=85), we assessed participants' distance estimation and ruled out potential biases. In particular, we considered the placement of visual stimuli in the field of view, users' exploration behavior as well as the presence of depth cues. For all experiments a two-alternative forced choice (2AFC) standardized psychophysical protocol was employed, in which the main task was to determine the stimuli that seemed to be the farthest one. In summary, our results showed that the orientation of virtual stimuli with respect to the user introduces a distance perception bias: objects placed on the sides are systematically perceived farther away than objects in front. In addition, we could observe that this bias increases along with the angle, and appears to be independent of both the position of the object in the field of view as well as the quality of the virtual scene. This work sheds a new light on one of the specificities of VR environments regarding the wider subject of visual space theory. Our study paves the way for future experiments evaluating the anisotropy of distance perception in real and virtual environments.", "abstracts": [ { "abstractType": "Regular", "content": "The topic of distance perception has been widely investigated in Virtual Reality (VR). However, the vast majority of previous work mainly focused on distance perception of objects placed in front of the observer. Then, what happens when the observer looks on the side? In this paper, we study differences in distance estimation when comparing objects placed in front of the observer with objects placed on his side. Through a series of four experiments (n=85), we assessed participants' distance estimation and ruled out potential biases. In particular, we considered the placement of visual stimuli in the field of view, users' exploration behavior as well as the presence of depth cues. For all experiments a two-alternative forced choice (2AFC) standardized psychophysical protocol was employed, in which the main task was to determine the stimuli that seemed to be the farthest one. In summary, our results showed that the orientation of virtual stimuli with respect to the user introduces a distance perception bias: objects placed on the sides are systematically perceived farther away than objects in front. In addition, we could observe that this bias increases along with the angle, and appears to be independent of both the position of the object in the field of view as well as the quality of the virtual scene. This work sheds a new light on one of the specificities of VR environments regarding the wider subject of visual space theory. Our study paves the way for future experiments evaluating the anisotropy of distance perception in real and virtual environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The topic of distance perception has been widely investigated in Virtual Reality (VR). However, the vast majority of previous work mainly focused on distance perception of objects placed in front of the observer. Then, what happens when the observer looks on the side? In this paper, we study differences in distance estimation when comparing objects placed in front of the observer with objects placed on his side. Through a series of four experiments (n=85), we assessed participants' distance estimation and ruled out potential biases. In particular, we considered the placement of visual stimuli in the field of view, users' exploration behavior as well as the presence of depth cues. For all experiments a two-alternative forced choice (2AFC) standardized psychophysical protocol was employed, in which the main task was to determine the stimuli that seemed to be the farthest one. In summary, our results showed that the orientation of virtual stimuli with respect to the user introduces a distance perception bias: objects placed on the sides are systematically perceived farther away than objects in front. In addition, we could observe that this bias increases along with the angle, and appears to be independent of both the position of the object in the field of view as well as the quality of the virtual scene. This work sheds a new light on one of the specificities of VR environments regarding the wider subject of visual space theory. Our study paves the way for future experiments evaluating the anisotropy of distance perception in real and virtual environments.", "fno": "08797826", "keywords": [ "Neurophysiology", "Psychology", "Virtual Reality", "Visual Perception", "Observer", "Virtual Stimuli", "Virtual Scene", "Virtual Environments", "Virtual Reality", "Distance Estimation", "Distance Perception", "Virtual Objects", "Two Alternative Forced Choice Standardized Psychophysical Protocol", "Visualization", "Resists", "Observers", "Calibration", "Anisotropic Magnetoresistance", "Virtual Reality", "Protocols", "Perception", "Distance", "Virtual Reality", "User Experiment", "Psychophysical Study", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies" ], "authors": [ { "affiliation": "Ecole Centrale de Nantes, AAU, Inria Hybrid", "fullName": "Etienne Peillard", "givenName": "Etienne", "surname": "Peillard", "__typename": "ArticleAuthorType" }, { "affiliation": "IRISA, CNRS, Inria, Univ. Rennes", "fullName": "Thomas Thebaud", "givenName": "Thomas", "surname": "Thebaud", "__typename": "ArticleAuthorType" }, { "affiliation": "Ecole Centrale de Nantes, AAU, Inria Hybrid", "fullName": "Jean-Marie Normand", "givenName": "Jean-Marie", "surname": "Normand", "__typename": "ArticleAuthorType" }, { "affiliation": "IRISA, CNRS, Inria, Univ. Rennes", "fullName": "Ferran Argelaguet", "givenName": "Ferran", "surname": "Argelaguet", "__typename": "ArticleAuthorType" }, { "affiliation": "Ecole Centrale de Nantes, AAU, Inria Hybrid", "fullName": "Guillaume Moreau", "givenName": "Guillaume", "surname": "Moreau", "__typename": "ArticleAuthorType" }, { "affiliation": "IRISA, CNRS, Inria, Univ. Rennes", "fullName": "Anatole Lécuyer", "givenName": "Anatole", "surname": "Lécuyer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "227-236", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797840", "articleId": "1cJ0XRudcWI", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798095", "articleId": "1cJ0Yxz6rrG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrais/1993/4910/0/00378257", "title": "Auditory distance perception by translating observers", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00378257/12OmNqBKTSU", "parentPublication": { "id": "proceedings/vrais/1993/4910/0", "title": "IEEE 1993 Symposium on Research Frontiers in Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a588", "title": "Using a Probabilistic Topic Model to Link Observers' Perception Tendency to Personality", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a588/12OmNqHqSlQ", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cybvis/1996/8058/0/00629459", "title": "What does the horizontal-vertical illusion show us about size perception?", "doi": null, "abstractUrl": "/proceedings-article/cybvis/1996/00629459/12OmNvSKNBX", "parentPublication": { "id": "proceedings/cybvis/1996/8058/0", "title": "Proceedings II Workshop on Cybernetic Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131756", "title": "Distance perception during cooperative virtual locomotion", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131756/12OmNy49sEA", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040701", "title": "Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040701/13rRUx0xPmZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08424067", "title": "Perception of Looming Motion in Virtual Reality Egocentric Interception Tasks", "doi": null, "abstractUrl": "/journal/tg/2019/10/08424067/13rRUyY28YF", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642384", "title": "Orientation Perception in Real and Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642384/17PYEkASbnU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a232", "title": "Empirical Evaluation of Calibration and Long-term Carryover Effects of Reverberation on Egocentric Auditory Depth Perception in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a232/1CJbNHnU8o0", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090452", "title": "Detection Thresholds of Tactile Perception in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090452/1jIxnAwbrSo", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09105956", "title": "Audio-Visual Perception of Omnidirectional Video for Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09105956/1kwqGsedM7m", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yfxMXu7XhK", "doi": "10.1109/ISMAR-Adjunct54149.2021.00048", "title": "Manipulating Rotational Perception in Virtual Reality", "normalizedTitle": "Manipulating Rotational Perception in Virtual Reality", "abstract": "People get disoriented and detached from the real world when immersed in a virtual environment; this makes them lose track of rotation in the real world. This paper studies people’s ability to maintain perception of spatial orientation in the real environment while engaged in a virtual experience and explores how visual cues affect the results. Twelve participants performed targeting tasks with rotations, followed by pointing in a known direction to observe the error in perception of real world orientation. Error was measured in three VR environments: visual cues consistent with real world rotation; visual cues slowly changing to become inconsistent with real world; no rotational visual cues. We found that visual cues are essential for people to perceive real-world orientation and removing cues results in drastic disorientation. Moreover, altering visual cues deliberately can be used to control people’s orientation perception to disorientate people in the direction we desire; in our experiment participants did not notice this manipulation. Manipulation of the presentation of visual cues may allow designers to control, correct and manipulate people’s cognitive representation of their orientation and position not only in the virtual world, but also in the real world, be it for in-place redirection or \"redirected standing\", or corrective redirection for safety.", "abstracts": [ { "abstractType": "Regular", "content": "People get disoriented and detached from the real world when immersed in a virtual environment; this makes them lose track of rotation in the real world. This paper studies people’s ability to maintain perception of spatial orientation in the real environment while engaged in a virtual experience and explores how visual cues affect the results. Twelve participants performed targeting tasks with rotations, followed by pointing in a known direction to observe the error in perception of real world orientation. Error was measured in three VR environments: visual cues consistent with real world rotation; visual cues slowly changing to become inconsistent with real world; no rotational visual cues. We found that visual cues are essential for people to perceive real-world orientation and removing cues results in drastic disorientation. Moreover, altering visual cues deliberately can be used to control people’s orientation perception to disorientate people in the direction we desire; in our experiment participants did not notice this manipulation. Manipulation of the presentation of visual cues may allow designers to control, correct and manipulate people’s cognitive representation of their orientation and position not only in the virtual world, but also in the real world, be it for in-place redirection or \"redirected standing\", or corrective redirection for safety.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "People get disoriented and detached from the real world when immersed in a virtual environment; this makes them lose track of rotation in the real world. This paper studies people’s ability to maintain perception of spatial orientation in the real environment while engaged in a virtual experience and explores how visual cues affect the results. Twelve participants performed targeting tasks with rotations, followed by pointing in a known direction to observe the error in perception of real world orientation. Error was measured in three VR environments: visual cues consistent with real world rotation; visual cues slowly changing to become inconsistent with real world; no rotational visual cues. We found that visual cues are essential for people to perceive real-world orientation and removing cues results in drastic disorientation. Moreover, altering visual cues deliberately can be used to control people’s orientation perception to disorientate people in the direction we desire; in our experiment participants did not notice this manipulation. Manipulation of the presentation of visual cues may allow designers to control, correct and manipulate people’s cognitive representation of their orientation and position not only in the virtual world, but also in the real world, be it for in-place redirection or \"redirected standing\", or corrective redirection for safety.", "fno": "129800a201", "keywords": [ "Cognition", "Virtual Reality", "Visual Perception", "World Rotation", "Rotational Visual Cues", "Real World Orientation", "Removing Cues Results", "Orientation Perception", "Virtual World", "Manipulating Rotational Perception", "Virtual Environment", "Virtual Reality", "Legged Locomotion", "Visualization", "Target Tracking", "Measurement Uncertainty", "Virtual Environments", "Games", "Particle Measurements", "Virtual Reality", "Spatial Updating", "Visual Cues", "Inconsistency", "Disorientation", "Rotation" ], "authors": [ { "affiliation": "University of Nottingham,Mixed Reality Lab, Computer Science", "fullName": "Jude Afana", "givenName": "Jude", "surname": "Afana", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Nottingham,Mixed Reality Lab, Computer Science", "fullName": "Joe Marshall", "givenName": "Joe", "surname": "Marshall", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Nottingham,Mixed Reality Lab, Computer Science", "fullName": "Paul Tennent", "givenName": "Paul", "surname": "Tennent", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "201-206", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a195", "articleId": "1yeQGgQscAU", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a207", "articleId": "1yfxNuG3Mju", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802054", "title": "Time perception during walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802054/12OmNBpmDG4", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549382", "title": "Poster: Do walking motions enhance visually induced self-motion illusions in virtual reality?", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549382/12OmNBr4eym", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2017/2943/0/2943a111", "title": "Designing for Depth Perceptions in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943a111/12OmNrMZpBd", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131756", "title": "Distance perception during cooperative virtual locomotion", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131756/12OmNy49sEA", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504752", "title": "Disguising rotational gain for redirected walking in virtual reality: Effect of visual density", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504752/12OmNyr8YkS", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223357", "title": "Towards context-sensitive reorientation for real walking in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223357/12OmNzE54AN", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08260856", "title": "NotifiVR: Exploring Interruptions and Notifications in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2018/04/08260856/13rRUxNmPDW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07955099", "title": "Collision Avoidance Behavior between Walkers: Global and Local Motion Cues", "doi": null, "abstractUrl": "/journal/tg/2018/07/07955099/13rRUxcbnHk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642384", "title": "Orientation Perception in Real and Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642384/17PYEkASbnU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a719", "title": "Spatial Updating in Virtual Reality – Auditory and Visual Cues in a Cave Automatic Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a719/1CJch0MXduw", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz2TCuO", "title": "Virtual Reality Conference, IEEE", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNwp74Br", "doi": "10.1109/VR.2012.6180875", "title": "Self-motion illusions (vection) in VR -- Are they good for anything?", "normalizedTitle": "Self-motion illusions (vection) in VR -- Are they good for anything?", "abstract": "When we locomote through real or virtual environments, self-to-object relationships constantly change. Nevertheless, in real environments we effortlessly maintain an ongoing awareness of roughly where we are with respect to our immediate surrounds, even in the absence of any direct perceptual support (e.g., in darkness or with eyes closed). In virtual environments, however, we tend to get lost far more easily. Why is that? Research suggests that physical motion cues are critical in facilitating this \"automatic spatial updating\" of the self-to-surround relationships during perspective changes. However, allowing for full physical motion in VR is costly and often unfeasible. Here, we demonstrated for the first time that the mere illusion of self-motion (\"circular vection\") can provide a similar benefit as actual self-motion: While blindfolded, participants were asked to imagine facing new perspectives in a well-learned room, and point to previously-learned objects. As expected, this task was difficult when participants could not physically rotate to the instructed perspective. Performance was significantly improved, however, when they perceived illusory self-rotation to the novel perspective (even though they did not physically move). This circular vection was induced by a combination of rotating sound fields (\"auditory vection\") and biomechanical vection from stepping along a carrousel-like rotating floor platter. In summary, illusory self-motion was shown to indeed facilitate perspective switches and thus spatial orientation. These findings have important implications for both our understanding of human spatial cognition and the design of more effective yet affordable VR simulators. In fact, it might ultimately enable us to relax the need for physical motion in VR by intelligently utilizing self-motion illusions.", "abstracts": [ { "abstractType": "Regular", "content": "When we locomote through real or virtual environments, self-to-object relationships constantly change. Nevertheless, in real environments we effortlessly maintain an ongoing awareness of roughly where we are with respect to our immediate surrounds, even in the absence of any direct perceptual support (e.g., in darkness or with eyes closed). In virtual environments, however, we tend to get lost far more easily. Why is that? Research suggests that physical motion cues are critical in facilitating this \"automatic spatial updating\" of the self-to-surround relationships during perspective changes. However, allowing for full physical motion in VR is costly and often unfeasible. Here, we demonstrated for the first time that the mere illusion of self-motion (\"circular vection\") can provide a similar benefit as actual self-motion: While blindfolded, participants were asked to imagine facing new perspectives in a well-learned room, and point to previously-learned objects. As expected, this task was difficult when participants could not physically rotate to the instructed perspective. Performance was significantly improved, however, when they perceived illusory self-rotation to the novel perspective (even though they did not physically move). This circular vection was induced by a combination of rotating sound fields (\"auditory vection\") and biomechanical vection from stepping along a carrousel-like rotating floor platter. In summary, illusory self-motion was shown to indeed facilitate perspective switches and thus spatial orientation. These findings have important implications for both our understanding of human spatial cognition and the design of more effective yet affordable VR simulators. In fact, it might ultimately enable us to relax the need for physical motion in VR by intelligently utilizing self-motion illusions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When we locomote through real or virtual environments, self-to-object relationships constantly change. Nevertheless, in real environments we effortlessly maintain an ongoing awareness of roughly where we are with respect to our immediate surrounds, even in the absence of any direct perceptual support (e.g., in darkness or with eyes closed). In virtual environments, however, we tend to get lost far more easily. Why is that? Research suggests that physical motion cues are critical in facilitating this \"automatic spatial updating\" of the self-to-surround relationships during perspective changes. However, allowing for full physical motion in VR is costly and often unfeasible. Here, we demonstrated for the first time that the mere illusion of self-motion (\"circular vection\") can provide a similar benefit as actual self-motion: While blindfolded, participants were asked to imagine facing new perspectives in a well-learned room, and point to previously-learned objects. As expected, this task was difficult when participants could not physically rotate to the instructed perspective. Performance was significantly improved, however, when they perceived illusory self-rotation to the novel perspective (even though they did not physically move). This circular vection was induced by a combination of rotating sound fields (\"auditory vection\") and biomechanical vection from stepping along a carrousel-like rotating floor platter. In summary, illusory self-motion was shown to indeed facilitate perspective switches and thus spatial orientation. These findings have important implications for both our understanding of human spatial cognition and the design of more effective yet affordable VR simulators. In fact, it might ultimately enable us to relax the need for physical motion in VR by intelligently utilizing self-motion illusions.", "fno": "06180875", "keywords": [], "authors": [ { "affiliation": "Simon Fraser University", "fullName": "Bernhard E. Riecke", "givenName": "Bernhard E.", "surname": "Riecke", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University", "fullName": "Daniel Feuereissen", "givenName": "Daniel", "surname": "Feuereissen", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University", "fullName": "John J. Rieser", "givenName": "John J.", "surname": "Rieser", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University", "fullName": "Timothy P. McNamara", "givenName": "Timothy P.", "surname": "McNamara", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-03-01T00:00:00", "pubType": "proceedings", "pages": "35-38", "year": "2012", "issn": null, "isbn": "978-1-4673-1247-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06180874", "articleId": "12OmNvjyxBL", "__typename": "AdjacentArticleType" }, "next": { "fno": "06180876", "articleId": "12OmNB6UI9j", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549382", "title": "Poster: Do walking motions enhance visually induced self-motion illusions in virtual reality?", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549382/12OmNBr4eym", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892307", "title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492799", "title": "Virtual acceleration with galvanic vestibular stimulation in a virtual reality environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492799/12OmNwJPMZr", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492765", "title": "Towards lean and elegant self-motion simulation in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492765/12OmNxWcHjT", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504699", "title": "Induction of linear and circular vection in real and virtual worlds", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504699/12OmNxwnclF", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504732", "title": "Effects of vibrotactile stimulation during virtual sandboarding", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504732/12OmNy50gfw", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pive/2012/1218/0/06229796", "title": "Vertical illusory self-motion through haptic stimulation of the feet", "doi": null, "abstractUrl": "/proceedings-article/pive/2012/06229796/12OmNz5JC1M", "parentPublication": { "id": "proceedings/pive/2012/1218/0", "title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480760", "title": "Circular, Linear, and Curvilinear Vection in a Large-screen Virtual Environment with Floor Projection", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480760/12OmNzAoi4A", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446345", "title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a814", "title": "Reverse 3D Sound Flow Can Decrease VR Sickness?", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a814/1J7WjyIbnrO", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqG0SWX", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "acronym": "intetain", "groupId": "1808166", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNxWuiji", "doi": "", "title": "Perceptually-inspired computing", "normalizedTitle": "Perceptually-inspired computing", "abstract": "Human sensory systems allow individuals to see, hear, touch, and interact with the surrounding physical environment. Understanding human perception and its limit enables us to better exploit the psychophysics of human perceptual systems to design more efficient, adaptive algorithms and develop perceptually-inspired computational models., In this talk, I will survey some of recent efforts on perceptually-inspired computing with applications to crowd simulation and multimodal interaction. In particular, I will present data-driven personality modeling based on the results of user studies, example-guided physics-based sound synthesis using auditory perception, as well as perceptually-inspired simplification for multimodal interaction. These perceptually guided principles can be used to accelerating multi-modal interaction and visual computing, thereby creating more natural human-computer interaction and providing more immersive experiences. I will also present their use in interactive applications for entertainment, such as video games, computer animation, and shared social experience. I will conclude by discussing possible future research directions.", "abstracts": [ { "abstractType": "Regular", "content": "Human sensory systems allow individuals to see, hear, touch, and interact with the surrounding physical environment. Understanding human perception and its limit enables us to better exploit the psychophysics of human perceptual systems to design more efficient, adaptive algorithms and develop perceptually-inspired computational models., In this talk, I will survey some of recent efforts on perceptually-inspired computing with applications to crowd simulation and multimodal interaction. In particular, I will present data-driven personality modeling based on the results of user studies, example-guided physics-based sound synthesis using auditory perception, as well as perceptually-inspired simplification for multimodal interaction. These perceptually guided principles can be used to accelerating multi-modal interaction and visual computing, thereby creating more natural human-computer interaction and providing more immersive experiences. I will also present their use in interactive applications for entertainment, such as video games, computer animation, and shared social experience. I will conclude by discussing possible future research directions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human sensory systems allow individuals to see, hear, touch, and interact with the surrounding physical environment. Understanding human perception and its limit enables us to better exploit the psychophysics of human perceptual systems to design more efficient, adaptive algorithms and develop perceptually-inspired computational models., In this talk, I will survey some of recent efforts on perceptually-inspired computing with applications to crowd simulation and multimodal interaction. In particular, I will present data-driven personality modeling based on the results of user studies, example-guided physics-based sound synthesis using auditory perception, as well as perceptually-inspired simplification for multimodal interaction. These perceptually guided principles can be used to accelerating multi-modal interaction and visual computing, thereby creating more natural human-computer interaction and providing more immersive experiences. I will also present their use in interactive applications for entertainment, such as video games, computer animation, and shared social experience. I will conclude by discussing possible future research directions.", "fno": "07325476", "keywords": [ "Computational Modeling", "Solid Modeling", "Computer Science", "Adaptation Models", "Entertainment Industry", "Games", "Animation", "Computer Animation", "Perceptually Inspired Computing", "Human Perceptual Systems", "Crowd Simulation", "Multimodal Interaction", "Human Computer Interaction", "Entertainment", "Video Games" ], "authors": [ { "affiliation": "Department of Computer Science University of North Carolina at Chapel Hill and Tsinghua University", "fullName": "Ming C. Lin", "givenName": "Ming C.", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "intetain", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2015", "issn": null, "isbn": "978-1-6319-0061-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07325475", "articleId": "12OmNvkpl4i", "__typename": "AdjacentArticleType" }, "next": { "fno": "07325477", "articleId": "12OmNBpmDGU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pact/2015/9524/0/9524a253", "title": "Brain-Inspired Computing", "doi": null, "abstractUrl": "/proceedings-article/pact/2015/9524a253/12OmNCmpcRR", "parentPublication": { "id": "proceedings/pact/2015/9524/0", "title": "2015 International Conference on Parallel Architecture and Compilation (PACT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2008/2840/0/04637352", "title": "Generating perceptually-correct shadows for mixed reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2008/04637352/12OmNwe2IoN", "parentPublication": { "id": "proceedings/ismar/2008/2840/0", "title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2009/3858/0/04810890", "title": "Perceptually augmented simulator design through decomposition", "doi": null, "abstractUrl": "/proceedings-article/whc/2009/04810890/12OmNx965F0", "parentPublication": { "id": "proceedings/whc/2009/3858/0", "title": "World Haptics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/03/ttp2009030458", "title": "A Perceptually Inspired Variational Framework for Color Enhancement", "doi": null, "abstractUrl": "/journal/tp/2009/03/ttp2009030458/13rRUEgs2D6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2016/02/mcg2016020047", "title": "Evaluating Existing Strategies to Limit Video Game Playing Time", "doi": null, "abstractUrl": "/magazine/cg/2016/02/mcg2016020047/13rRUxlgxPu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2012/01/tth2012010066", "title": "Perceptually Augmented Simulator Design", "doi": null, "abstractUrl": "/journal/th/2012/01/tth2012010066/13rRUygT7fm", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrhciai/2022/9182/0/918200a134", "title": "MIND-VR: A Utility Approach of Human-Computer Interaction in Virtual Space based on Autonomous Consciousness", "doi": null, "abstractUrl": "/proceedings-article/vrhciai/2022/918200a134/1LxffWquCrK", "parentPublication": { "id": "proceedings/vrhciai/2022/9182/0", "title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/1997/7761/0/00582100", "title": "Perceptually lossless image compression", "doi": null, "abstractUrl": "/proceedings-article/dcc/1997/00582100/1dUnbYA1ur6", "parentPublication": { "id": "proceedings/dcc/1997/7761/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichci/2020/2316/0/231600a105", "title": "Analysis on the Connection Between Nonplayer Character And Artificial Intelligence", "doi": null, "abstractUrl": "/proceedings-article/ichci/2020/231600a105/1tuAb37Ffy0", "parentPublication": { "id": "proceedings/ichci/2020/2316/0", "title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2021/0189/0/018900a117", "title": "Perception of Personality Traits in Crowds of Virtual Humans", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2021/018900a117/1zusqJ6D3Ne", "parentPublication": { "id": "proceedings/sbgames/2021/0189/0", "title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVGcIy", "title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "1997", "__typename": "ProceedingType" }, "article": { "id": "12OmNywxlQo", "doi": "10.1109/CVPR.1997.609346", "title": "Panoramic mosaics by manifold projection", "normalizedTitle": "Panoramic mosaics by manifold projection", "abstract": "As the field of view of a picture is much smaller than our own visual field of view, it is common to paste together several pictures to create a panoramic mosaic having a larger field of view. Images with a wider field of view can be generated by using fish-eye lens, or panoramic mosaics can be created by special devices which rotate around the camera's optical center (Quicktime VR, Surround Video), or by aligning, and pasting, frames in a video sequence to a single reference frame. Existing mosaicing methods have strong limitations on imaging conditions, and distortions are common. Manifold projection enables the creation of panoramic mosaics from video sequences under more general conditions, and in particular the unrestricted motion of a hand-held camera. The panoramic mosaic is a projection of the scene into a virtual manifold whose structure depends on the camera's motion. This manifold is more general than the customary projections onto a single image plane or onto a cylinder. In addition to being more general than traditional mosaics, manifold projection is also computationally efficient, as the only image deformations used are image plane translations and rotations. Real-time, software only, implementation on a Pentium-PC, proves the superior quality and speed of this approach.", "abstracts": [ { "abstractType": "Regular", "content": "As the field of view of a picture is much smaller than our own visual field of view, it is common to paste together several pictures to create a panoramic mosaic having a larger field of view. Images with a wider field of view can be generated by using fish-eye lens, or panoramic mosaics can be created by special devices which rotate around the camera's optical center (Quicktime VR, Surround Video), or by aligning, and pasting, frames in a video sequence to a single reference frame. Existing mosaicing methods have strong limitations on imaging conditions, and distortions are common. Manifold projection enables the creation of panoramic mosaics from video sequences under more general conditions, and in particular the unrestricted motion of a hand-held camera. The panoramic mosaic is a projection of the scene into a virtual manifold whose structure depends on the camera's motion. This manifold is more general than the customary projections onto a single image plane or onto a cylinder. In addition to being more general than traditional mosaics, manifold projection is also computationally efficient, as the only image deformations used are image plane translations and rotations. Real-time, software only, implementation on a Pentium-PC, proves the superior quality and speed of this approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As the field of view of a picture is much smaller than our own visual field of view, it is common to paste together several pictures to create a panoramic mosaic having a larger field of view. Images with a wider field of view can be generated by using fish-eye lens, or panoramic mosaics can be created by special devices which rotate around the camera's optical center (Quicktime VR, Surround Video), or by aligning, and pasting, frames in a video sequence to a single reference frame. Existing mosaicing methods have strong limitations on imaging conditions, and distortions are common. Manifold projection enables the creation of panoramic mosaics from video sequences under more general conditions, and in particular the unrestricted motion of a hand-held camera. The panoramic mosaic is a projection of the scene into a virtual manifold whose structure depends on the camera's motion. This manifold is more general than the customary projections onto a single image plane or onto a cylinder. In addition to being more general than traditional mosaics, manifold projection is also computationally efficient, as the only image deformations used are image plane translations and rotations. Real-time, software only, implementation on a Pentium-PC, proves the superior quality and speed of this approach.", "fno": "78220338", "keywords": [ "Image Matching Panoramic Mosaics Manifold Projection Field Of View Fish Eye Lens Camera Optical Center Quicktime VR Surround Video Frame Alignment Video Sequence Single Reference Frame Distortions Hand Held Camera Motion Virtual Manifold Image Deformation Image Plane Translations Image Rotations Real Time System Pentium PC Quality" ], "authors": [ { "affiliation": "Inst. of Comput. Sci., Hebrew Univ., Jerusalem, Israel", "fullName": "S. Peleg", "givenName": "S.", "surname": "Peleg", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Comput. Sci., Hebrew Univ., Jerusalem, Israel", "fullName": "J. Herman", "givenName": "J.", "surname": "Herman", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1997-06-01T00:00:00", "pubType": "proceedings", "pages": "338", "year": "1997", "issn": "1063-6919", "isbn": "0-8186-7822-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "78220331", "articleId": "12OmNx0RIWi", "__typename": "AdjacentArticleType" }, "next": { "fno": "78220344", "articleId": "12OmNvDI44S", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ173PsiTS", "doi": "10.1109/VR.2019.8797985", "title": "Demonstration of Perceptually Based Adaptive Motion Retargeting to Animate Real Objects by Light Projection", "normalizedTitle": "Demonstration of Perceptually Based Adaptive Motion Retargeting to Animate Real Objects by Light Projection", "abstract": "A recently developed light projection technique can add dynamic impressions to static real objects without changing their original visual attributes such as surface colors and textures. It produces illusory motion impressions in the projection target by projecting gray-scale motion-inducer patterns that selectively drive the motion detectors in the human visual system. However, with this technique, determining the best deformation sizes is often difficult: When users try to add a large deformation, the deviation in the projected patterns from the original surface pattern on the target object becomes apparent. Therefore, to obtain satisfactory results, they have to spend much time and effort to manually adjust the shift sizes. Here, to overcome this limitation, we propose an optimization framework that adaptively retargets the displacement vectors based on a perceptual model. The perceptual model predicts the subjective inconsistency between a projected pattern and an original one by simulating responses in the human visual system. The displacement vectors are adaptively optimized so that the projection effect is maximized within the tolerable range predicted by the model. In the research demonstration, we will present a demo tool that incorporates our optimization technique, where a user can interactively edit dynamic appearances of a real object without cumbersome manual adjustments of deformation sizes.", "abstracts": [ { "abstractType": "Regular", "content": "A recently developed light projection technique can add dynamic impressions to static real objects without changing their original visual attributes such as surface colors and textures. It produces illusory motion impressions in the projection target by projecting gray-scale motion-inducer patterns that selectively drive the motion detectors in the human visual system. However, with this technique, determining the best deformation sizes is often difficult: When users try to add a large deformation, the deviation in the projected patterns from the original surface pattern on the target object becomes apparent. Therefore, to obtain satisfactory results, they have to spend much time and effort to manually adjust the shift sizes. Here, to overcome this limitation, we propose an optimization framework that adaptively retargets the displacement vectors based on a perceptual model. The perceptual model predicts the subjective inconsistency between a projected pattern and an original one by simulating responses in the human visual system. The displacement vectors are adaptively optimized so that the projection effect is maximized within the tolerable range predicted by the model. In the research demonstration, we will present a demo tool that incorporates our optimization technique, where a user can interactively edit dynamic appearances of a real object without cumbersome manual adjustments of deformation sizes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A recently developed light projection technique can add dynamic impressions to static real objects without changing their original visual attributes such as surface colors and textures. It produces illusory motion impressions in the projection target by projecting gray-scale motion-inducer patterns that selectively drive the motion detectors in the human visual system. However, with this technique, determining the best deformation sizes is often difficult: When users try to add a large deformation, the deviation in the projected patterns from the original surface pattern on the target object becomes apparent. Therefore, to obtain satisfactory results, they have to spend much time and effort to manually adjust the shift sizes. Here, to overcome this limitation, we propose an optimization framework that adaptively retargets the displacement vectors based on a perceptual model. The perceptual model predicts the subjective inconsistency between a projected pattern and an original one by simulating responses in the human visual system. The displacement vectors are adaptively optimized so that the projection effect is maximized within the tolerable range predicted by the model. In the research demonstration, we will present a demo tool that incorporates our optimization technique, where a user can interactively edit dynamic appearances of a real object without cumbersome manual adjustments of deformation sizes.", "fno": "08797985", "keywords": [ "Computer Animation", "Data Visualisation", "Human Computer Interaction", "Motion Estimation", "Optimisation", "Visual Perception", "Human Visual System", "Displacement Vectors", "Projection Effect", "Optimization Technique", "Deformation Sizes", "Perceptually Based Adaptive Motion Retargeting", "Dynamic Impressions", "Static Real Objects", "Original Visual Attributes", "Surface Colors", "Illusory Motion Impressions", "Projection Target", "Gray Scale Motion Inducer Patterns", "Motion Detectors", "Projected Pattern", "Target Object", "Perceptual Model", "Light Projection Technique", "Surface Pattern", "Real Object Animation", "Strain", "Adaptation Models", "Optimization", "Computational Modeling", "Predictive Models", "Brain Modeling", "Dynamics", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality", "HCI Design And Evaluation Methods", "User Models" ], "authors": [ { "affiliation": "NTT Communication Science Laboratories", "fullName": "Taiki Fukiage", "givenName": "Taiki", "surname": "Fukiage", "__typename": "ArticleAuthorType" }, { "affiliation": "NTT Communication Science Laboratories", "fullName": "Takahiro Kawabe", "givenName": "Takahiro", "surname": "Kawabe", "__typename": "ArticleAuthorType" }, { "affiliation": "NTT Communication Science Laboratories", "fullName": "Shin'ya Nishida", "givenName": "Shin'ya", "surname": "Nishida", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1301-1302", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797884", "articleId": "1cJ0TJmlU9q", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798104", "articleId": "1cJ0GEEyqf6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2010/4166/0/4166a173", "title": "Perceptually-Guided Design of Nonperspectives through Pictorial Depth Cues", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a173/12OmNAtaS3V", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2009/3852/0/3852a031", "title": "Real-Time Speed Variation Detection of Moving Human Using Polar Projection Feature", "doi": null, "abstractUrl": "/proceedings-article/icinis/2009/3852a031/12OmNzuIjrc", "parentPublication": { "id": "proceedings/icinis/2009/3852/0", "title": "Intelligent Networks and Intelligent Systems, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a674", "title": "Gait Recognition by Deformable Registration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a674/17D45XzbnKw", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643571", "title": "Perceptually Based Adaptive Motion Retargeting to Animate Real Objects by Light Projection", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643571/18LF8zpSgUM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2022/6908/0/690800a021", "title": "Perceptual Control of Food Taste with Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2022/690800a021/1FWmZYvi4MM", "parentPublication": { "id": "proceedings/nicoint/2022/6908/0", "title": "2022 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797923", "title": "Material Surface Reproduction and Perceptual Deformation with Projection Mapping for Car Interior Design", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797923/1cJ0SEdW2Lm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08839414", "title": "Sparse Data Driven Mesh Deformation", "doi": null, "abstractUrl": "/journal/tg/2021/03/08839414/1dqsrINsJsk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eei/2019/4076/0/407600a412", "title": "Multi-Projection Surface Deformation Control Method Based on Elevation Naturalization Model", "doi": null, "abstractUrl": "/proceedings-article/eei/2019/407600a412/1hrJtqAuNa0", "parentPublication": { "id": "proceedings/eei/2019/4076/0", "title": "2019 International Conference on Electronic Engineering and Informatics (EEI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a164", "title": "ElaMorph Projection: Deformation of 3D Shape by Dynamic Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a164/1pysuGClQ9a", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428129", "title": "Deep Supervised Image Retargeting", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428129/1uilCqTvE8U", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1fJHCcnu", "doi": "10.1109/VR.2019.8797850", "title": "Perception of Motion-Adaptive Color Images Displayed by a High-Speed DMD Projector", "normalizedTitle": "Perception of Motion-Adaptive Color Images Displayed by a High-Speed DMD Projector", "abstract": "Recent progress of high-speed projectors using DMD (Digital Micromirror Device) has enabled low-latency motion adaptability of displayed images, which is a key challenge in achieving projection-based dynamic interaction systems. This paper presents evaluation of different approaches in achieving fast motion adaptability with DMD projectors through a subjective image evaluation experiment and a discrimination experiment. The results suggest that the approach proposed by the authors, which updates the image position for every binary frame instead of for every video frame, applied to 60-fps video input offers perceptual image quality comparable with the quality offered by 500-fps projection.", "abstracts": [ { "abstractType": "Regular", "content": "Recent progress of high-speed projectors using DMD (Digital Micromirror Device) has enabled low-latency motion adaptability of displayed images, which is a key challenge in achieving projection-based dynamic interaction systems. This paper presents evaluation of different approaches in achieving fast motion adaptability with DMD projectors through a subjective image evaluation experiment and a discrimination experiment. The results suggest that the approach proposed by the authors, which updates the image position for every binary frame instead of for every video frame, applied to 60-fps video input offers perceptual image quality comparable with the quality offered by 500-fps projection.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent progress of high-speed projectors using DMD (Digital Micromirror Device) has enabled low-latency motion adaptability of displayed images, which is a key challenge in achieving projection-based dynamic interaction systems. This paper presents evaluation of different approaches in achieving fast motion adaptability with DMD projectors through a subjective image evaluation experiment and a discrimination experiment. The results suggest that the approach proposed by the authors, which updates the image position for every binary frame instead of for every video frame, applied to 60-fps video input offers perceptual image quality comparable with the quality offered by 500-fps projection.", "fno": "08797850", "keywords": [ "High Speed Optical Techniques", "Image Colour Analysis", "Interactive Systems", "Micromirrors", "Motion Estimation", "Optical Projectors", "Video Signal Processing", "Visual Perception", "Motion Adaptive Color Images", "High Speed DMD Projector", "Projection Based Dynamic Interaction Systems", "Fast Motion Adaptability", "Subjective Image Evaluation Experiment", "Discrimination Experiment", "Perceptual Image Quality", "Digital Micromirror Device", "Light Sources", "Pulse Width Modulation", "Color", "Image Color Analysis", "Image Quality", "Brightness", "Delays", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Devices", "Displays And Imagers", "Interaction Paradigms", "Mixed Augmented Reality" ], "authors": [ { "affiliation": "School of Engineering, Tohoku University", "fullName": "Wakana Oshiro", "givenName": "Wakana", "surname": "Oshiro", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate School of Information Sciences, Tohoku University", "fullName": "Shingo Kagami", "givenName": "Shingo", "surname": "Kagami", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Engineering, Tohoku University", "fullName": "Koichi Hashimoto", "givenName": "Koichi", "surname": "Hashimoto", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1790-1793", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797902", "articleId": "1cJ0TDtBm0w", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797858", "articleId": "1cJ0JWkSE3m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2011/0529/0/05981787", "title": "Surface depth computation and representation from multiple coded projector light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981787/12OmNBWi6Gz", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011885", "title": "Novel projector calibration approaches of multi-resolution display", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2015/7082/0/07177434", "title": "Painted face effect removal by a projector-camera system with dynamic ambient light adaptability", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177434/12OmNqG0T4h", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780017", "title": "Achieving Color Uniformity Across Multi-Projector Displays", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780017/12OmNwlHSVv", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a337", "title": "High Resolution Projector for 3D Imaging", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a337/12OmNxZkhvA", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1360", "title": "A Unified Paradigm For Scalable Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1360/13rRUNvgz48", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061317", "title": "Color Seamlessness in Multi-Projector Displays Using Constrained Gamut Morphing", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061317/13rRUwgQpqH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2018/6481/0/648101a190", "title": "Multi-projector Resolution Enhancement Through Biased Interpolation", "doi": null, "abstractUrl": "/proceedings-article/crv/2018/648101a190/17D45XacGiu", "parentPublication": { "id": "proceedings/crv/2018/6481/0", "title": "2018 15th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714121", "title": "Dynamic Multi-projection Mapping Based on Parallel Intensity Control", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714121/1B0XZ5wr7na", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797714", "title": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797714/1cJ0L8WggAE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mTG", "title": "2007 International Conference on Multimedia & Expo", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNB9bvdW", "doi": "10.1109/ICME.2007.4285089", "title": "A High Resolution Video Display System by Seamlessly Tiling Multiple Projectors", "normalizedTitle": "A High Resolution Video Display System by Seamlessly Tiling Multiple Projectors", "abstract": "With the rapid advances of digital photography technology, high resolution video can be recorded using video camera for home entertainment and digital cinema. High end projector used for high resolution video display is bulky and expensive, only suitable for fixed installation, and prevents itself from being widely adopted. In this paper, we present one high resolution video display system using multiple projectors to replace the high end projector playback system. Our system is driven by single PC with PCI-E16 x interface and supports at least triple projectors, which is suitable for building surround video display system. It fully exploits the high bandwidth between CPU and graphics card, and efficiently plays back high resolution video of any format supported by commercial video player. We design geometric alignment and photometric correction methods to display the video content seamlessly on the planar display screen. Experimental results show that our system displays high resolution video at its full frame rate.", "abstracts": [ { "abstractType": "Regular", "content": "With the rapid advances of digital photography technology, high resolution video can be recorded using video camera for home entertainment and digital cinema. High end projector used for high resolution video display is bulky and expensive, only suitable for fixed installation, and prevents itself from being widely adopted. In this paper, we present one high resolution video display system using multiple projectors to replace the high end projector playback system. Our system is driven by single PC with PCI-E16 x interface and supports at least triple projectors, which is suitable for building surround video display system. It fully exploits the high bandwidth between CPU and graphics card, and efficiently plays back high resolution video of any format supported by commercial video player. We design geometric alignment and photometric correction methods to display the video content seamlessly on the planar display screen. Experimental results show that our system displays high resolution video at its full frame rate.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the rapid advances of digital photography technology, high resolution video can be recorded using video camera for home entertainment and digital cinema. High end projector used for high resolution video display is bulky and expensive, only suitable for fixed installation, and prevents itself from being widely adopted. In this paper, we present one high resolution video display system using multiple projectors to replace the high end projector playback system. Our system is driven by single PC with PCI-E16 x interface and supports at least triple projectors, which is suitable for building surround video display system. It fully exploits the high bandwidth between CPU and graphics card, and efficiently plays back high resolution video of any format supported by commercial video player. We design geometric alignment and photometric correction methods to display the video content seamlessly on the planar display screen. Experimental results show that our system displays high resolution video at its full frame rate.", "fno": "04285089", "keywords": [ "Computer Displays", "Digital Photography", "Entertainment", "Image Resolution", "Optical Projectors", "Peripheral Interfaces", "Photometry", "Screens Display", "Video Cameras", "High Resolution Video Display System", "Multiple Projector", "Digital Photography", "Video Camera", "Home Entertainment", "Digital Cinema", "High End Projector Playback System", "CPU", "Graphics Card", "Geometric Alignment Method", "Photometric Correction Method", "PCI E 16 X Interface", "Commercial Video Player", "Planar Display Screen", "Decoding", "Motion Pictures", "Transform Coding", "Video Compression", "Computer Displays", "Liquid Crystal Displays", "HDTV", "Plasma Displays", "Computer Graphics", "Software" ], "authors": [ { "affiliation": "Computer Graphics Lab, Software School, Fudan University, China. zdjiang@fudan.edu.cn", "fullName": "Zhongding Jiang", "givenName": "Zhongding", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Graphics Lab, Software School, Fudan University, China", "fullName": "Yandong Mao", "givenName": "Yandong", "surname": "Mao", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Graphics Lab, Software School, Fudan University, China", "fullName": "Bo Qin", "givenName": "Bo", "surname": "Qin", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Graphics Lab, Software School, Fudan University, China. byzang@fudan.edu.cn", "fullName": "Binyu Zang", "givenName": "Binyu", "surname": "Zang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-07-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2007", "issn": "1945-7871", "isbn": "1-4244-1016-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04285088", "articleId": "12OmNvjQ8HB", "__typename": "AdjacentArticleType" }, "next": { "fno": "04285090", "articleId": "12OmNvCRgkV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2006/0366/0/04036621", "title": "Automatic Geometric and Photometric Calibration for Tiling Multiple Projectors with a Pan-Tilt-Zoom Camera", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036621/12OmNANBZqd", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2004/2244/0/01410480", "title": "A survey of multi-projector tiled display wall construction", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981693", "title": "Prototyping a light field display involving direct observation of a video projector array", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981693/12OmNB1wkNJ", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmpeur/1989/1940/0/00093361", "title": "Information display-an overview and trends", "doi": null, "abstractUrl": "/proceedings-article/cmpeur/1989/00093361/12OmNBr4eI3", "parentPublication": { "id": "proceedings/cmpeur/1989/1940/0", "title": "COMPEURO 89 Proceedings VLSI and Computer Peripherals.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011885", "title": "Novel projector calibration approaches of multi-resolution display", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1993/1363/0/00380757", "title": "A 1\" high resolution field sequential display for head mounted applications", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380757/12OmNsbY6S1", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ainaw/2007/2847/2/04224229", "title": "Tele-Immersive Collaboration Using High-Resolution Video in Tiled Displays Environment", "doi": null, "abstractUrl": "/proceedings-article/ainaw/2007/04224229/12OmNwCsdLk", "parentPublication": { "id": "proceedings/ainaw/2007/2847/2", "title": "Advanced Information Networking and Applications Workshops, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543467", "title": "Display gamut reshaping for color emulation and balancing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543467/12OmNyr8Ynx", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643566", "title": "Temporal Resolution Multiplexing: Exploiting the limitations of spatio-temporal vision for more efficient VR rendering", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643566/18bmPXA5Ik0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2020/01/08889397", "title": "Glasses-Free 3-D and Augmented Reality Display Advances: From Theory to Implementation", "doi": null, "abstractUrl": "/magazine/mu/2020/01/08889397/1ezPlyZdxeM", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy6qfOu", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNynJMIq", "doi": "10.1109/VISUAL.1999.809900", "title": "LOD-Sprite Technique for Accelerated Terrain Rendering", "normalizedTitle": "LOD-Sprite Technique for Accelerated Terrain Rendering", "abstract": "We present a new rendering technique, termed LOD-sprite rendering, which uses a combination of a level-of-detail (LOD) representation of the scene together with reusing image sprites (previously rendered images). Our primary application is accelerating terrain rendering. The LOD-sprite technique renders an initial frame using a high-resolution model of the scene geometry. It renders subsequent frames with a much lower-resolution model of the scene geometry and texture-maps each polygon with the image sprite from the initial high-resolution frame. As it renders these subsequent frames the technique measures the error associated with the divergence of the view position from the position where the initial frame was rendered. Once this error exceeds a user-defined threshold, the technique re-renders the scene from the high-resolution model. We have efficiently implemented the LOD-sprite technique with texture-mapping graphics hardware. Although to date we have only applied LOD-sprite to terrain rendering, it could easily be extended to other applications. We feel LOD-sprite holds particular promise for real-time rendering systems.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new rendering technique, termed LOD-sprite rendering, which uses a combination of a level-of-detail (LOD) representation of the scene together with reusing image sprites (previously rendered images). Our primary application is accelerating terrain rendering. The LOD-sprite technique renders an initial frame using a high-resolution model of the scene geometry. It renders subsequent frames with a much lower-resolution model of the scene geometry and texture-maps each polygon with the image sprite from the initial high-resolution frame. As it renders these subsequent frames the technique measures the error associated with the divergence of the view position from the position where the initial frame was rendered. Once this error exceeds a user-defined threshold, the technique re-renders the scene from the high-resolution model. We have efficiently implemented the LOD-sprite technique with texture-mapping graphics hardware. Although to date we have only applied LOD-sprite to terrain rendering, it could easily be extended to other applications. We feel LOD-sprite holds particular promise for real-time rendering systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new rendering technique, termed LOD-sprite rendering, which uses a combination of a level-of-detail (LOD) representation of the scene together with reusing image sprites (previously rendered images). Our primary application is accelerating terrain rendering. The LOD-sprite technique renders an initial frame using a high-resolution model of the scene geometry. It renders subsequent frames with a much lower-resolution model of the scene geometry and texture-maps each polygon with the image sprite from the initial high-resolution frame. As it renders these subsequent frames the technique measures the error associated with the divergence of the view position from the position where the initial frame was rendered. Once this error exceeds a user-defined threshold, the technique re-renders the scene from the high-resolution model. We have efficiently implemented the LOD-sprite technique with texture-mapping graphics hardware. Although to date we have only applied LOD-sprite to terrain rendering, it could easily be extended to other applications. We feel LOD-sprite holds particular promise for real-time rendering systems.", "fno": "58970049", "keywords": [ "Image Based Modeling And Rendering", "Texture Mapping", "Acceleration Techniques", "Multi Resolution", "Level Of Detail", "Terrain Rendering", "Virtual Reality", "Virtual Environments" ], "authors": [ { "affiliation": "State University of New York at Stony Brook", "fullName": "Baoquan Chen", "givenName": "Baoquan", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "The Naval Research Laboratory", "fullName": "J. Edward Swan II", "givenName": "J. Edward", "surname": "Swan II", "__typename": "ArticleAuthorType" }, { "affiliation": "The Naval Research Laboratory", "fullName": "Eddy Kuo", "givenName": "Eddy", "surname": "Kuo", "__typename": "ArticleAuthorType" }, { "affiliation": "State University of New York at Stony Brook", "fullName": "Arie Kaufman", "givenName": "Arie", "surname": "Kaufman", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-10-01T00:00:00", "pubType": "proceedings", "pages": "49", "year": "1999", "issn": "1070-2385", "isbn": "0-7803-5897-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "58970048", "articleId": "12OmNCwUmwn", "__typename": "AdjacentArticleType" }, "next": { "fno": "58970050", "articleId": "12OmNvkpkRn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2004/8788/0/87880417", "title": "LoD Volume Rendering of FEA Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880417/12OmNAZfxGz", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780197", "title": "Interactive Stereoscopic Rendering of Voxel-Based Terrain", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780197/12OmNBPc8wv", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esiat/2009/3682/3/3682c716", "title": "Ringlike Level of Detail in Real-Time Terrain Rendering", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682c716/12OmNBtCCDl", "parentPublication": { "id": "proceedings/esiat/2009/3682/3", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sutc/2010/4049/0/4049a442", "title": "Design and Implementation of Adaptive Rendering Engine for Large Scale 3D-Terrain Data", "doi": null, "abstractUrl": "/proceedings-article/sutc/2010/4049a442/12OmNC1Guik", "parentPublication": { "id": "proceedings/sutc/2010/4049/0", "title": "Sensor Networks, Ubiquitous, and Trustworthy Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/2/3521b003", "title": "Improved Error Metric of Terrain Rendering for Flying High Over the Terrain", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521b003/12OmNrJRP5e", "parentPublication": { "id": "proceedings/iccet/2009/3521/1", "title": "Computer Engineering and Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2015/7143/0/7143a901", "title": "Research on Terrain Visualization Based on LOD Dynamic", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2015/7143a901/12OmNvJXezD", "parentPublication": { "id": "proceedings/icmtma/2015/7143/0", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcasia/2004/2138/0/21380280", "title": "VIsualization for HPC Data - Large Terrain Model", "doi": null, "abstractUrl": "/proceedings-article/hpcasia/2004/21380280/12OmNy50g4O", "parentPublication": { "id": "proceedings/hpcasia/2004/2138/0", "title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780041", "title": "Texturing Techniques for Terrain Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780041/12OmNzVXNRv", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nana/2022/6131/0/613100a489", "title": "Class Continuous LOD Algorithm for Lightweight WebGL Rendering Optimization", "doi": null, "abstractUrl": "/proceedings-article/nana/2022/613100a489/1JwPCv7gTNS", "parentPublication": { "id": "proceedings/nana/2022/6131/0", "title": "2022 International Conference on Networking and Network Applications (NaNA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a397", "title": "Vegetation Rendering Optimization for Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a397/1ap5wyffDYA", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwJPMY7", "title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.", "acronym": "avss", "groupId": "1001307", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNyuPLog", "doi": "10.1109/AVSS.2005.1577311", "title": "Dual-sensor camera for acquiring image sequences with different spatio-temporal resolution", "normalizedTitle": "Dual-sensor camera for acquiring image sequences with different spatio-temporal resolution", "abstract": "In accordance with advances in camera technology, the requirement of high-quality video has greatly increased. Among the factors required for high-quality video are a high-resolution and a high frame rate. However, the limitation of the pixel transfer rate restricts compatibility of a high-resolution and a high frame rate in commercial camera. We propose a dual-sensor camera that consists of two cameras: one with a high-resolution and a low frame rate and the other with a low-resolution and a high frame rate. The system is capable of capturing two different image sequences: high-resolution images and high-frame-rate images. A sensor calibration for the dual-sensor camera is also proposed.", "abstracts": [ { "abstractType": "Regular", "content": "In accordance with advances in camera technology, the requirement of high-quality video has greatly increased. Among the factors required for high-quality video are a high-resolution and a high frame rate. However, the limitation of the pixel transfer rate restricts compatibility of a high-resolution and a high frame rate in commercial camera. We propose a dual-sensor camera that consists of two cameras: one with a high-resolution and a low frame rate and the other with a low-resolution and a high frame rate. The system is capable of capturing two different image sequences: high-resolution images and high-frame-rate images. A sensor calibration for the dual-sensor camera is also proposed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In accordance with advances in camera technology, the requirement of high-quality video has greatly increased. Among the factors required for high-quality video are a high-resolution and a high frame rate. However, the limitation of the pixel transfer rate restricts compatibility of a high-resolution and a high frame rate in commercial camera. We propose a dual-sensor camera that consists of two cameras: one with a high-resolution and a low frame rate and the other with a low-resolution and a high frame rate. The system is capable of capturing two different image sequences: high-resolution images and high-frame-rate images. A sensor calibration for the dual-sensor camera is also proposed.", "fno": "01577311", "keywords": [ "Sensor Calibration", "Dual Sensor Camera", "Image Sequences", "Spatio Temporal Resolution", "High Resolution Images", "High Frame Rate Images" ], "authors": [ { "affiliation": "Graduate Sch. of Eng. Sci., Osaka Univ., Japan", "fullName": "H. Nagahara", "givenName": "H.", "surname": "Nagahara", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate Sch. of Eng. Sci., Osaka Univ., Japan", "fullName": "A. Hoshikawa", "givenName": "A.", "surname": "Hoshikawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate Sch. of Eng. Sci., Osaka Univ., Japan", "fullName": "T. Shigemoto", "givenName": "T.", "surname": "Shigemoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate Sch. of Eng. Sci., Osaka Univ., Japan", "fullName": "Y. Iwai", "givenName": "Y.", "surname": "Iwai", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate Sch. of Eng. Sci., Osaka Univ., Japan", "fullName": "M. Yachida", "givenName": "M.", "surname": "Yachida", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "H. Tanaka", "givenName": "H.", "surname": "Tanaka", "__typename": "ArticleAuthorType" } ], "idPrefix": "avss", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-09-01T00:00:00", "pubType": "proceedings", "pages": "450-455", "year": "2005", "issn": null, "isbn": "0-7803-9385-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01577310", "articleId": "12OmNC3FGks", "__typename": "AdjacentArticleType" }, "next": { "fno": "01577312", "articleId": "12OmNxzuMHZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccit/2009/3896/0/3896a406", "title": "Acquiring High-Resolution Face Image through Detection and Focusing", "doi": null, "abstractUrl": "/proceedings-article/iccit/2009/3896a406/12OmNBkxsrU", "parentPublication": { "id": "proceedings/iccit/2009/3896/0", "title": "Convergence Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2017/2818/0/2818a292", "title": "Scale-Corrected Background Modeling", "doi": null, "abstractUrl": "/proceedings-article/crv/2017/2818a292/12OmNC2fGq7", "parentPublication": { "id": "proceedings/crv/2017/2818/0", "title": "2017 14th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995542", "title": "P2C2: Programmable pixel compressive camera for high speed imaging", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995542/12OmNCvumOo", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/1996/7620/0/76200162", "title": "Based on 2D Spatio-Temporal Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/1996/76200162/12OmNx76TTx", "parentPublication": { "id": "proceedings/wacv/1996/7620/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995418", "title": "High resolution multispectral video capture with a hybrid camera system", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995418/12OmNzC5SSM", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2011/4548/0/4548a242", "title": "Spatio-temporal Resolution Enhancement of Vocal Tract MRI Sequences Based on the Wiener Filter", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2011/4548a242/12OmNzXFoFt", "parentPublication": { "id": "proceedings/sibgrapi/2011/4548/0", "title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301375", "title": "Video compressive sensing with on-chip programmable subsampling", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301375/12OmNzmclG5", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2022/6382/0/09959711", "title": "Dual Camera Based High Spatio-Temporal Resolution Video Generation For Wide Area Surveillance", "doi": null, "abstractUrl": "/proceedings-article/avss/2022/09959711/1Iz59B0m2as", "parentPublication": { "id": "proceedings/avss/2022/6382/0", "title": "2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/10/09052471", "title": "A Dual Camera System for High Spatiotemporal Resolution Video Acquisition", "doi": null, "abstractUrl": "/journal/tp/2021/10/09052471/1iFLGJyUQ1O", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09070203", "title": "Deep Slow Motion Video Reconstruction With Hybrid Imaging System", "doi": null, "abstractUrl": "/journal/tp/2020/07/09070203/1j6k8byBiJW", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwWorE", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNzTH0ZC", "doi": "10.1109/ICCVW.2009.5457481", "title": "Spatio-temporal image-based texture atlases for dynamic 3-D models", "normalizedTitle": "Spatio-temporal image-based texture atlases for dynamic 3-D models", "abstract": "In this paper, we propose a method for creating a high-quality spatio-temporal texture atlas from a dynamic 3-D model and a set of calibrated video sequences. By adopting an actual spatio-temporal perspective, beyond independent frame-by-frame computations, we fully exploit the very high redundancy in the input video sequences. First, we drastically cut down on the amount of texture data, and thereby we greatly enhance the portability and the rendering efficiency of the model. Second, we gather the numerous different viewpoint/time appearances of the scene, so as to recover from low resolution, grazing views, highlights, shadows and occlusions which affect some regions of the spatio-temporal model. Altogether, our method allows the synthesis of novel views from a small quantity of texture data, with an optimal visual quality throughout the sequence, with minimally visible color discontinuities, and without flickering artifacts. These properties are demonstrated on real datasets.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a method for creating a high-quality spatio-temporal texture atlas from a dynamic 3-D model and a set of calibrated video sequences. By adopting an actual spatio-temporal perspective, beyond independent frame-by-frame computations, we fully exploit the very high redundancy in the input video sequences. First, we drastically cut down on the amount of texture data, and thereby we greatly enhance the portability and the rendering efficiency of the model. Second, we gather the numerous different viewpoint/time appearances of the scene, so as to recover from low resolution, grazing views, highlights, shadows and occlusions which affect some regions of the spatio-temporal model. Altogether, our method allows the synthesis of novel views from a small quantity of texture data, with an optimal visual quality throughout the sequence, with minimally visible color discontinuities, and without flickering artifacts. These properties are demonstrated on real datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a method for creating a high-quality spatio-temporal texture atlas from a dynamic 3-D model and a set of calibrated video sequences. By adopting an actual spatio-temporal perspective, beyond independent frame-by-frame computations, we fully exploit the very high redundancy in the input video sequences. First, we drastically cut down on the amount of texture data, and thereby we greatly enhance the portability and the rendering efficiency of the model. Second, we gather the numerous different viewpoint/time appearances of the scene, so as to recover from low resolution, grazing views, highlights, shadows and occlusions which affect some regions of the spatio-temporal model. Altogether, our method allows the synthesis of novel views from a small quantity of texture data, with an optimal visual quality throughout the sequence, with minimally visible color discontinuities, and without flickering artifacts. These properties are demonstrated on real datasets.", "fno": "05457481", "keywords": [ "Layout", "Video Sequences", "Reflectivity", "Cameras", "Spatiotemporal Phenomena", "Redundancy", "Conferences", "Mesh Generation", "Animation", "Veins" ], "authors": [ { "affiliation": "IMAGINE Université Paris-Est and INRIA Rhône-Alpes / LJK, Grenoble, France", "fullName": "Zsolt Jankó", "givenName": "Zsolt", "surname": "Jankó", "__typename": "ArticleAuthorType" }, { "affiliation": "IMAGINE Université Paris-Est, CSTB, Sophia-Antipolis, France", "fullName": "Jean-Philippe Pons", "givenName": "Jean-Philippe", "surname": "Pons", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-09-01T00:00:00", "pubType": "proceedings", "pages": "1646-1653", "year": "2009", "issn": null, "isbn": "978-1-4244-4442-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05457480", "articleId": "12OmNyRg4sM", "__typename": "AdjacentArticleType" }, "next": { "fno": "05457482", "articleId": "12OmNvrdI0F", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2008/3075/0/04438842", "title": "Video Content Description Using Fuzzy Spatio-temporal Relations", "doi": null, "abstractUrl": "/proceedings-article/hicss/2008/04438842/12OmNBEpnAz", "parentPublication": { "id": "proceedings/hicss/2008/3075/0", "title": "Proceedings of the 41st Annual Hawaii International Conference on System Sciences (HICSS 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2920/0/00202099", "title": "Quantitative 3-D texture analysis of interphase cell nuclei", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00202099/12OmNxE2mIU", "parentPublication": { "id": "proceedings/icpr/1992/2920/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2009/3883/0/3883a293", "title": "Dynamic Texture Segmentation Using 3-D Fourier Transform", "doi": null, "abstractUrl": "/proceedings-article/icig/2009/3883a293/12OmNxwncaD", "parentPublication": { "id": "proceedings/icig/2009/3883/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acct/2015/8488/0/8488a395", "title": "Spatio-Temporal Data Models with Their Different Approaches and Their Features", "doi": null, "abstractUrl": "/proceedings-article/acct/2015/8488a395/12OmNyXMQch", "parentPublication": { "id": "proceedings/acct/2015/8488/0", "title": "2015 Fifth International Conference on Advanced Computing & Communication Technologies (ACCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200h456", "title": "Spatio-Temporal Dynamic Inference Network for Group Activity Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200h456/1BmJuUSFXck", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859741", "title": "Spatio-Temporal Self-Supervision Enhanced Transformer Networks for Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859741/1G9EGdZv2y4", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3957", "title": "Contextualized Spatio-Temporal Contrastive Learning with Self-Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3957/1H1k3U73keQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956271", "title": "AutoMF: Spatio-temporal Architecture Search for The Meteorological Forecasting Task", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956271/1IHpqeCEi5y", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600a032", "title": "Defending Water Treatment Networks: Exploiting Spatio-Temporal Effects for Cyber Attack Detection", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600a032/1r54BHgnF96", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412714", "title": "Part-based Collaborative Spatio-temporal Feature Learning for Cloth-changing Gait Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412714/1tmhw3rrsre", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1LMbvP5o2iI", "title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)", "acronym": "hpca", "groupId": "10070856", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1LMbGLjCyqc", "doi": "10.1109/HPCA56546.2023.10071097", "title": "Post0-VR: Enabling Universal Realistic Rendering for Modern VR via Exploiting Architectural Similarity and Data Sharing", "normalizedTitle": "Post0-VR: Enabling Universal Realistic Rendering for Modern VR via Exploiting Architectural Similarity and Data Sharing", "abstract": "To provide users with a fully immersive environment, VR post-processing, which adds numerous realistic effects on the frame after rendering, plays a key role in modern VR systems. Current post-processing is processed separately from normal rendering by the graphics processing unit (GPU). As a result, the GPU needs to first render a high-resolution frame and then add the post-processing effects within a very short time frame. Our in-depth experimental results on commercial VR products demonstrate that the post-processing in VR applications extends the VR frame time by approximately 2X on average. Furthermore, the ever-increasing resolution requirements of modern VR significantly increase the workloads for post-processing in the execution pipeline. This long delay causes VR real-time execution to frequently miss the critical frame-time deadline, thus hurting users’ quality of experience.Based on the analysis of VR post-processing workflow and its common realistic effects, we observe that post-processing shares the same hardware pipeline with normal rendering, and even reuses the intermediate data produced by normal rendering. To fully utilize this hardware-level similarity and capture the data locality, we propose a novel universal realistic rendering architecture for VR, named Post0-VR, which eliminates post-processing by directly merging the common realistic effects into the normal rendering process. Based on our newly proposed VR architecture design, we further propose a dynamic accuracy adjustment method to simplify the normal rendering without hurting users’ perception. The evaluation results on real-world applications demonstrate that Post0-VR can support different types of realistic effects while significantly improving the overall VR rendering performance.", "abstracts": [ { "abstractType": "Regular", "content": "To provide users with a fully immersive environment, VR post-processing, which adds numerous realistic effects on the frame after rendering, plays a key role in modern VR systems. Current post-processing is processed separately from normal rendering by the graphics processing unit (GPU). As a result, the GPU needs to first render a high-resolution frame and then add the post-processing effects within a very short time frame. Our in-depth experimental results on commercial VR products demonstrate that the post-processing in VR applications extends the VR frame time by approximately 2X on average. Furthermore, the ever-increasing resolution requirements of modern VR significantly increase the workloads for post-processing in the execution pipeline. This long delay causes VR real-time execution to frequently miss the critical frame-time deadline, thus hurting users’ quality of experience.Based on the analysis of VR post-processing workflow and its common realistic effects, we observe that post-processing shares the same hardware pipeline with normal rendering, and even reuses the intermediate data produced by normal rendering. To fully utilize this hardware-level similarity and capture the data locality, we propose a novel universal realistic rendering architecture for VR, named Post0-VR, which eliminates post-processing by directly merging the common realistic effects into the normal rendering process. Based on our newly proposed VR architecture design, we further propose a dynamic accuracy adjustment method to simplify the normal rendering without hurting users’ perception. The evaluation results on real-world applications demonstrate that Post0-VR can support different types of realistic effects while significantly improving the overall VR rendering performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To provide users with a fully immersive environment, VR post-processing, which adds numerous realistic effects on the frame after rendering, plays a key role in modern VR systems. Current post-processing is processed separately from normal rendering by the graphics processing unit (GPU). As a result, the GPU needs to first render a high-resolution frame and then add the post-processing effects within a very short time frame. Our in-depth experimental results on commercial VR products demonstrate that the post-processing in VR applications extends the VR frame time by approximately 2X on average. Furthermore, the ever-increasing resolution requirements of modern VR significantly increase the workloads for post-processing in the execution pipeline. This long delay causes VR real-time execution to frequently miss the critical frame-time deadline, thus hurting users’ quality of experience.Based on the analysis of VR post-processing workflow and its common realistic effects, we observe that post-processing shares the same hardware pipeline with normal rendering, and even reuses the intermediate data produced by normal rendering. To fully utilize this hardware-level similarity and capture the data locality, we propose a novel universal realistic rendering architecture for VR, named Post0-VR, which eliminates post-processing by directly merging the common realistic effects into the normal rendering process. Based on our newly proposed VR architecture design, we further propose a dynamic accuracy adjustment method to simplify the normal rendering without hurting users’ perception. The evaluation results on real-world applications demonstrate that Post0-VR can support different types of realistic effects while significantly improving the overall VR rendering performance.", "fno": "10071097", "keywords": [ "Graphics Processing Units", "Image Resolution", "Real Time Systems", "Rendering Computer Graphics", "Virtual Reality", "Commercial VR Products", "Common Realistic Effects", "Critical Frame Time Deadline", "Current Post Processing", "Enabling Universal Realistic Rendering", "Graphics Processing Unit", "High Resolution Frame", "Modern VR Systems", "Named Post 0 VR", "Normal Rendering Process", "Numerous Realistic Effects", "Post Processing Effects", "Post Processing Shares", "Render", "Short Time Frame", "Universal Realistic Rendering Architecture", "VR Applications", "VR Frame Time", "VR Post Processing Workflow", "VR Rendering Performance", "Pipelines", "Merging", "Graphics Processing Units", "Computer Architecture", "Virtual Reality", "Quality Of Service", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "University of Houston,ECMOS Lab,ECE Department", "fullName": "Yu Wen", "givenName": "Yu", "surname": "Wen", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,School of Computer Science and Engineering", "fullName": "Chenhao Xie", "givenName": "Chenhao", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Sydney,FSA Lab, School of Computer Science", "fullName": "Shuaiwen Leon Song", "givenName": "Shuaiwen Leon", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Houston,ECMOS Lab,ECE Department", "fullName": "Xin Fu", "givenName": "Xin", "surname": "Fu", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpca", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-02-01T00:00:00", "pubType": "proceedings", "pages": "390-402", "year": "2023", "issn": null, "isbn": "978-1-6654-7652-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "10070999", "articleId": "1LMbzYX6Uww", "__typename": "AdjacentArticleType" }, "next": { "fno": "10070940", "articleId": "1LMbFlbsgwg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2012/1204/0/06184187", "title": "Democratizing rendering for multiple viewers in surround VR systems", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184187/12OmNBubOX9", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/searis/2014/9955/0/07152799", "title": "guacamole - An extensible scene graph and rendering framework based on deferred shading", "doi": null, "abstractUrl": "/proceedings-article/searis/2014/07152799/12OmNzA6GLj", "parentPublication": { "id": "proceedings/searis/2014/9955/0", "title": "2014 IEEE 7th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643566", "title": "Temporal Resolution Multiplexing: Exploiting the limitations of spatio-temporal vision for more efficient VR rendering", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643566/18bmPXA5Ik0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a160", "title": "Portal Rendering and Creation Interactions in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a160/1JrR7uagUqQ", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a397", "title": "Vegetation Rendering Optimization for Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a397/1ap5wyffDYA", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09005240", "title": "Eye-dominance-guided Foveated Rendering", "doi": null, "abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090471", "title": "Efficient Peripheral Flicker Reduction for Foveated Rendering in Mobile VR Systems", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090471/1jIxm9DsWDS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a106", "title": "FAVR - Accelerating Direct Volume Rendering for Virtual RealitySystems", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a106/1qRNBEWTyEw", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2020/6406/0/640600a027", "title": "A Hybrid Compress Method of STL Mesh for Realtime VR Visulization", "doi": null, "abstractUrl": "/proceedings-article/icisce/2020/640600a027/1x3kudJk5EI", "parentPublication": { "id": "proceedings/icisce/2020/6406/0", "title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a117", "title": "Smoke Diffusion Simulation and Physically-Based Rendering for VR", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a117/1yBF4dk6rf2", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1fHrlRE", "doi": "10.1109/VR.2018.8446383", "title": "Simulated Reference Frame: A Cost-Effective Solution to Improve Spatial Orientation in VR", "normalizedTitle": "Simulated Reference Frame: A Cost-Effective Solution to Improve Spatial Orientation in VR", "abstract": "Virtual Reality (VR) is increasingly used in spatial cognition research, as it offers high experimental control in naturalistic multimodal environments, which is hard to achieve in real-world settings. Although recent technological advances offer a high level of photorealism, locomotion in VR is still restricted because people might not perceive their self-motion as they would in the real world. This might be related to the inability to use embodied spatial orientation processes, which support automatic and obligatory updating of our spatial awareness. Previous research has identified the roles reference frames play in retaining spatial orientation. Here, we propose using visually overlaid rectangular boxes, simulating reference frames in VR, to provide users with a better insight into spatial direction in landmark-free virtual environments. The current mixed-method study investigated how different variations of the visually simulated reference frames might support people in a navigational search task. Performance results showed that the existence of a simulated reference frame yields significant effects on participants completion time and travel distance. Though a simulated CAVE translating with the navigator (one of the simulated reference frames) did not provide significant benefits, the simulated room (another simulated reference frame depicting a rest frame) significantly boosted user performance in the task as well as improved participants preference in the post-experiment evaluation. Results suggest that adding a visually simulated reference frame to VR applications might be a cost-effective solution to the spatial disorientation problem in VR.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality (VR) is increasingly used in spatial cognition research, as it offers high experimental control in naturalistic multimodal environments, which is hard to achieve in real-world settings. Although recent technological advances offer a high level of photorealism, locomotion in VR is still restricted because people might not perceive their self-motion as they would in the real world. This might be related to the inability to use embodied spatial orientation processes, which support automatic and obligatory updating of our spatial awareness. Previous research has identified the roles reference frames play in retaining spatial orientation. Here, we propose using visually overlaid rectangular boxes, simulating reference frames in VR, to provide users with a better insight into spatial direction in landmark-free virtual environments. The current mixed-method study investigated how different variations of the visually simulated reference frames might support people in a navigational search task. Performance results showed that the existence of a simulated reference frame yields significant effects on participants completion time and travel distance. Though a simulated CAVE translating with the navigator (one of the simulated reference frames) did not provide significant benefits, the simulated room (another simulated reference frame depicting a rest frame) significantly boosted user performance in the task as well as improved participants preference in the post-experiment evaluation. Results suggest that adding a visually simulated reference frame to VR applications might be a cost-effective solution to the spatial disorientation problem in VR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality (VR) is increasingly used in spatial cognition research, as it offers high experimental control in naturalistic multimodal environments, which is hard to achieve in real-world settings. Although recent technological advances offer a high level of photorealism, locomotion in VR is still restricted because people might not perceive their self-motion as they would in the real world. This might be related to the inability to use embodied spatial orientation processes, which support automatic and obligatory updating of our spatial awareness. Previous research has identified the roles reference frames play in retaining spatial orientation. Here, we propose using visually overlaid rectangular boxes, simulating reference frames in VR, to provide users with a better insight into spatial direction in landmark-free virtual environments. The current mixed-method study investigated how different variations of the visually simulated reference frames might support people in a navigational search task. Performance results showed that the existence of a simulated reference frame yields significant effects on participants completion time and travel distance. Though a simulated CAVE translating with the navigator (one of the simulated reference frames) did not provide significant benefits, the simulated room (another simulated reference frame depicting a rest frame) significantly boosted user performance in the task as well as improved participants preference in the post-experiment evaluation. Results suggest that adding a visually simulated reference frame to VR applications might be a cost-effective solution to the spatial disorientation problem in VR.", "fno": "08446383", "keywords": [ "Cognition", "Human Computer Interaction", "Virtual Reality", "Spatial Direction", "Landmark Free Virtual Environments", "Visually Simulated Reference Frame", "Cost Effective Solution", "Spatial Disorientation Problem", "Spatial Cognition Research", "Naturalistic Multimodal Environments", "Embodied Spatial Orientation Processes", "Spatial Orientation", "Virtual Reality", "CAVE Translating", "Task Analysis", "Navigation", "Visualization", "Virtual Environments", "Resists", "Legged Locomotion", "Cognition", "Human Centered Computing Empirical Studies In HCI" ], "authors": [ { "affiliation": "Simon Fraser University, School of Interactive Arts + Technology, BC, Canada", "fullName": "Thinh Nguyen-Vo", "givenName": "Thinh", "surname": "Nguyen-Vo", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University, School of Interactive Arts + Technology, BC, Canada", "fullName": "Bernhard E. Riecke", "givenName": "Bernhard E.", "surname": "Riecke", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University, School of Interactive Arts + Technology, BC, Canada", "fullName": "Wolfgang Stuerzlinger", "givenName": "Wolfgang", "surname": "Stuerzlinger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "415-422", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446300", "articleId": "13bd1fdV4lD", "__typename": "AdjacentArticleType" }, "next": { "fno": "08447560", "articleId": "13bd1rsER1H", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2017/6716/0/07893344", "title": "Moving in a box: Improving spatial orientation in virtual reality using simulated reference frames", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893344/12OmNz5s0Sq", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446300", "title": "Human Compensation Strategies for Orientation Drifts", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446300/13bd1fdV4lD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577177", "title": "Influence of hearing your steps and environmental sounds in VR while walking", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a222", "title": "Design and Evaluation of Travel and Orientation Techniques for Desk VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a222/1CJc05Lu2LS", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a596", "title": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a596/1CJf4aHcqoU", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797999", "title": "VR system to simulate tightrope walking with a standalone VR headset and slack rails", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797999/1cJ0Nqr10CA", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798017", "title": "Simulated Reference Frame Effects on Steering, Jumping and Sliding", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798017/1cJ0YUTkHao", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798025", "title": "Occlusion Management in VR: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798025/1cJ1f6V69wY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090608", "title": "Towards an Affordance of Embodied Locomotion Interfaces in VR: How to Know How to Move?", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090608/1jIxnjPP9Ti", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a093", "title": "The Action Consistency of Casting in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a093/1vg7TCY0eqY", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1OrW6KxW", "doi": "10.1109/ISMAR-Adjunct.2018.00041", "title": "Effect of Navigation Speed and VR Devices on Cybersickness", "normalizedTitle": "Effect of Navigation Speed and VR Devices on Cybersickness", "abstract": "“Cybertravel” in virtual reality (VR) system can easily provoke cyber-sickness as there are no vestibular cues available during visual optic flow. This study examined the effect of navigation speed as well as the use of different VR devices for navigation on cybersickness. Participants experience street navigation while they are standing still. Four conditions: CAVE (cave automatic virtual environment) and HMD (head-mounted display) x 10 m/s and 24 m/s, were tested while participants perform a counting task. Results showed that higher navigation speed leads to increase in ratings of severity of cybersickness measured by simulator sickness questionnaire (SSQ) and miserable score (MISC). A difference in cybersickness ratings between VR devices is also observed with experimental order effect.", "abstracts": [ { "abstractType": "Regular", "content": "“Cybertravel” in virtual reality (VR) system can easily provoke cyber-sickness as there are no vestibular cues available during visual optic flow. This study examined the effect of navigation speed as well as the use of different VR devices for navigation on cybersickness. Participants experience street navigation while they are standing still. Four conditions: CAVE (cave automatic virtual environment) and HMD (head-mounted display) x 10 m/s and 24 m/s, were tested while participants perform a counting task. Results showed that higher navigation speed leads to increase in ratings of severity of cybersickness measured by simulator sickness questionnaire (SSQ) and miserable score (MISC). A difference in cybersickness ratings between VR devices is also observed with experimental order effect.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "“Cybertravel” in virtual reality (VR) system can easily provoke cyber-sickness as there are no vestibular cues available during visual optic flow. This study examined the effect of navigation speed as well as the use of different VR devices for navigation on cybersickness. Participants experience street navigation while they are standing still. Four conditions: CAVE (cave automatic virtual environment) and HMD (head-mounted display) x 10 m/s and 24 m/s, were tested while participants perform a counting task. Results showed that higher navigation speed leads to increase in ratings of severity of cybersickness measured by simulator sickness questionnaire (SSQ) and miserable score (MISC). A difference in cybersickness ratings between VR devices is also observed with experimental order effect.", "fno": "08699319", "keywords": [ "Human Computer Interaction", "Virtual Reality", "Head Mounted Display", "Cybersickness Ratings", "Virtual Reality System", "Vestibular Cues", "Visual Optic Flow", "Street Navigation", "Cave Automatic Virtual Environment", "Navigation Speed", "Cybertravel", "Navigation", "Resists", "Visualization", "Virtual Environments", "Task Analysis", "Optical Sensors", "Motion Sickness", "VIMS", "CAVE", "Imse CAVE", "HMD", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality", "Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Perception" ], "authors": [ { "affiliation": "The Univeristy of Hong Kong", "fullName": "Kristie K. K. Kwok", "givenName": "Kristie K. K.", "surname": "Kwok", "__typename": "ArticleAuthorType" }, { "affiliation": "The Univeristy of Hong Kong", "fullName": "Adrian K. T. Ng", "givenName": "Adrian K. T.", "surname": "Ng", "__typename": "ArticleAuthorType" }, { "affiliation": "The Univeristy of Hong Kong", "fullName": "Henry Y. K. Lau", "givenName": "Henry Y. K.", "surname": "Lau", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "91-92", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699299", "articleId": "19F1MCTMB32", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699249", "articleId": "19F1RlY3coU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2015/6886/0/07131742", "title": "Methods to reduce cybersickness and enhance presence for in-place navigation techniques", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131742/12OmNyxFKaM", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446192", "title": "Using Cybersickness Indicators to Adapt Navigation in Virtual Reality: A Pre-Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446192/13bd1eSlyt4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737429", "title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a428", "title": "You’re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a428/1CJbKYSq2Vq", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a682", "title": "Geometric simplification for reducing optic flow in VR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a682/1J7WqYsXIuA", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797781", "title": "Effect of Sensory Conflict and Postural Instability on Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797781/1cJ0Sg2UoQE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797728", "title": "Towards an Immersive Driving Simulator to Study Factors Related to Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797728/1cJ110fSqvm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089513", "title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089437", "title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a373", "title": "Using Fuzzy Logic to Involve Individual Differences for Predicting Cybersickness during VR Navigation", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a373/1tuAPQPWR2g", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1SrRS4vK", "doi": "10.1109/ISMAR-Adjunct.2018.00043", "title": "Effective Free Field of View Scene Exploration in VR and AR", "normalizedTitle": "Effective Free Field of View Scene Exploration in VR and AR", "abstract": "We propose to improve virtual reality (VR) and optical see-through augmented reality (AR) head-mounted display scene exploration efficiency by allowing the user to adapt the field of view interactively. This way the user can zoom in to examine parts of the scene in more detail without having to translate the viewpoint forward, as would be required in conventional fixed field of view scene exploration. The user can also zoom out, to gain a more comprehensive view of the scene and to examine distant parts of the scene in parallel, without the need to translate the viewpoint backward. Zooming in is supported with a focus+context visualization approach that integrates a distortion-free magnified focus region seamlessly into context. For AR, the higher resolution focus region is resampled from the video feed acquired by a head-mounted high-resolution camera. We demonstrate the benefits of our free field of view scene exploration in the context of VR and AR tasks, where it brings a substantial reduction of viewpoint translation, view direction rotation, and task completion time.", "abstracts": [ { "abstractType": "Regular", "content": "We propose to improve virtual reality (VR) and optical see-through augmented reality (AR) head-mounted display scene exploration efficiency by allowing the user to adapt the field of view interactively. This way the user can zoom in to examine parts of the scene in more detail without having to translate the viewpoint forward, as would be required in conventional fixed field of view scene exploration. The user can also zoom out, to gain a more comprehensive view of the scene and to examine distant parts of the scene in parallel, without the need to translate the viewpoint backward. Zooming in is supported with a focus+context visualization approach that integrates a distortion-free magnified focus region seamlessly into context. For AR, the higher resolution focus region is resampled from the video feed acquired by a head-mounted high-resolution camera. We demonstrate the benefits of our free field of view scene exploration in the context of VR and AR tasks, where it brings a substantial reduction of viewpoint translation, view direction rotation, and task completion time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose to improve virtual reality (VR) and optical see-through augmented reality (AR) head-mounted display scene exploration efficiency by allowing the user to adapt the field of view interactively. This way the user can zoom in to examine parts of the scene in more detail without having to translate the viewpoint forward, as would be required in conventional fixed field of view scene exploration. The user can also zoom out, to gain a more comprehensive view of the scene and to examine distant parts of the scene in parallel, without the need to translate the viewpoint backward. Zooming in is supported with a focus+context visualization approach that integrates a distortion-free magnified focus region seamlessly into context. For AR, the higher resolution focus region is resampled from the video feed acquired by a head-mounted high-resolution camera. We demonstrate the benefits of our free field of view scene exploration in the context of VR and AR tasks, where it brings a substantial reduction of viewpoint translation, view direction rotation, and task completion time.", "fno": "08699200", "keywords": [ "Augmented Reality", "Data Visualisation", "Helmet Mounted Displays", "Image Resolution", "Lenses", "View Direction Rotation", "Head Mounted High Resolution Camera", "Distortion Free Magnified Focus Region", "Comprehensive View", "Conventional Fixed Field", "Augmented Reality Head Mounted Display Scene Exploration Efficiency", "VR", "View Scene Exploration", "Effective Free Field", "Visualization", "Cameras", "Resists", "Distortion", "Augmented Reality", "Legged Locomotion", "Augmented Reality", "Virtual Reality", "Navigation", "Field Of View" ], "authors": [ { "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "fullName": "Lili Wang", "givenName": "Lili", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "fullName": "Antong Cao", "givenName": "Antong", "surname": "Cao", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "fullName": "Zhichao Li", "givenName": "Zhichao", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "fullName": "Xuefeng Yang", "givenName": "Xuefeng", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Computer Science, Purdue University, West Lafayette, US", "fullName": "Voicu Popescu", "givenName": "Voicu", "surname": "Popescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "97-102", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699249", "articleId": "19F1RlY3coU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699286", "articleId": "19F1VntaVYQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iiai-aai/2017/0621/0/0621a539", "title": "Enhancing AR-based Science Exploration through Learning Cycle", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2017/0621a539/12OmNAYGlF1", "parentPublication": { "id": "proceedings/iiai-aai/2017/0621/0", "title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2013/11/0/06728913", "title": "AR replay in a small workspace", "doi": null, "abstractUrl": "/proceedings-article/icat/2013/06728913/12OmNyUFfRZ", "parentPublication": { "id": "proceedings/icat/2013/11/0", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446560", "title": "An AR-Guided System for Fast Image-Based Modeling of Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446560/13bd1eOELLz", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/12/08123949", "title": "Efficient VR and AR Navigation Through Multiperspective Occlusion Management", "doi": null, "abstractUrl": "/journal/tg/2018/12/08123949/14H4WNoi7Yc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642365", "title": "VR Exploration Assistance through Automatic Occlusion Removal", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642365/17PYEj2mz9Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798331", "title": "Robust High-Level Video Stabilization for Effective AR Telementoring", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798331/1cJ0RHFst8c", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798025", "title": "Occlusion Management in VR: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798025/1cJ1f6V69wY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a081", "title": "SafeAR: AR Alert System Assisting Obstacle Avoidance for Pedestrians", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a081/1gysiL9OKUo", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a249", "title": "Retargetable AR: Context-aware Augmented Reality in Indoor Scenes based on 3D Scene Graph", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a249/1pBMjRRAvtK", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09332290", "title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration", "doi": null, "abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0JISlXDG", "doi": "10.1109/VR.2019.8797777", "title": "Exploration of Large Omnidirectional Images in Immersive Environments", "normalizedTitle": "Exploration of Large Omnidirectional Images in Immersive Environments", "abstract": "Navigation is a major challenge in exploring data within immersive environments, especially of large omnidirectional spherical images. We propose a method of auto-scaling to allow users to navigate using teleportation within the safe boundary of their physical environment with different levels of focus. Our method combines physical navigation with virtual teleportation. We also propose a “peek then warp” behavior when using a zoom lens and evaluate our system in conjunction with different teleportation transitions, including a proposed transition for exploration of omnidirectional and 360-degree panoramic imagery, termed Envelop, wherein the destination view expands out from the zoom lens to completely envelop the user. In this work, we focus on visualizing and navigating large omnidirectional or panoramic images with application to GIS visualization as an inside-out omnidirectional image of the earth. We conducted two user studies to evaluate our techniques over a search and comparison task. Our results illustrate the advantages of our techniques for navigation and exploration of omnidirectional images in an immersive environment.", "abstracts": [ { "abstractType": "Regular", "content": "Navigation is a major challenge in exploring data within immersive environments, especially of large omnidirectional spherical images. We propose a method of auto-scaling to allow users to navigate using teleportation within the safe boundary of their physical environment with different levels of focus. Our method combines physical navigation with virtual teleportation. We also propose a “peek then warp” behavior when using a zoom lens and evaluate our system in conjunction with different teleportation transitions, including a proposed transition for exploration of omnidirectional and 360-degree panoramic imagery, termed Envelop, wherein the destination view expands out from the zoom lens to completely envelop the user. In this work, we focus on visualizing and navigating large omnidirectional or panoramic images with application to GIS visualization as an inside-out omnidirectional image of the earth. We conducted two user studies to evaluate our techniques over a search and comparison task. Our results illustrate the advantages of our techniques for navigation and exploration of omnidirectional images in an immersive environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Navigation is a major challenge in exploring data within immersive environments, especially of large omnidirectional spherical images. We propose a method of auto-scaling to allow users to navigate using teleportation within the safe boundary of their physical environment with different levels of focus. Our method combines physical navigation with virtual teleportation. We also propose a “peek then warp” behavior when using a zoom lens and evaluate our system in conjunction with different teleportation transitions, including a proposed transition for exploration of omnidirectional and 360-degree panoramic imagery, termed Envelop, wherein the destination view expands out from the zoom lens to completely envelop the user. In this work, we focus on visualizing and navigating large omnidirectional or panoramic images with application to GIS visualization as an inside-out omnidirectional image of the earth. We conducted two user studies to evaluate our techniques over a search and comparison task. Our results illustrate the advantages of our techniques for navigation and exploration of omnidirectional images in an immersive environment.", "fno": "08797777", "keywords": [ "Data Visualisation", "Geographic Information Systems", "Geophysical Image Processing", "Teleportation", "Virtual Reality", "Immersive Environment", "Omnidirectional Spherical Images", "Auto Scaling", "Physical Environment", "Physical Navigation", "Virtual Teleportation", "Peek Then Warp Behavior", "Zoom Lens", "360 Degree Panoramic Imagery", "Panoramic Images", "Teleportation Transitions", "Navigation", "Teleportation", "Cameras", "Data Visualization", "Lenses", "Earth", "Virtual Environments", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality", "Human Centered Computing X 2014 Visualization X 2014 Visualization Application Domains X 2014 Geographic Visualization" ], "authors": [ { "affiliation": "Stony Brook University, NY, USA", "fullName": "Seyedkoosha Mirhosseini", "givenName": "Seyedkoosha", "surname": "Mirhosseini", "__typename": "ArticleAuthorType" }, { "affiliation": "Stony Brook University, NY, USA", "fullName": "Parmida Ghahremani", "givenName": "Parmida", "surname": "Ghahremani", "__typename": "ArticleAuthorType" }, { "affiliation": "Stony Brook Univ., Stony Brook, NY, USA", "fullName": "Sushant Ojal", "givenName": "Sushant", "surname": "Ojal", "__typename": "ArticleAuthorType" }, { "affiliation": "Stony Brook University, NY, USA", "fullName": "Joseph Marino", "givenName": "Joseph", "surname": "Marino", "__typename": "ArticleAuthorType" }, { "affiliation": "Stony Brook Univ., Stony Brook, NY, USA", "fullName": "Arie Kaufman", "givenName": "Arie", "surname": "Kaufman", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "413-422", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798016", "articleId": "1cJ14UH16nK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797843", "articleId": "1cJ0QjNw1u8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2009/3583/1/3583a502", "title": "Real-Time FPGA-Based Panoramic Unrolling of High-Resolution Catadioptric Omnidirectional Images", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583a502/12OmNBeRtRp", "parentPublication": { "id": "proceedings/icmtma/2009/3583/3", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798852", "title": "Reorientation in virtual environments using interactive portals", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798852/12OmNqBbHVR", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550203", "title": "Navigating in virtual environments with 360° omnidirectional rendering", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550203/12OmNvzJG8K", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446620", "title": "Spatial Updating and Simulator Sickness During Steering and Jumping in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446620/13bd1fKQxs4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a556", "title": "Group WiM: A Group Navigation Technique for Collaborative Virtual Reality Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a556/1CJdXqzjctO", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049698", "title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a082", "title": "WiM-Based Group Navigation for Collaborative Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a082/1KmFfzv6fWo", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382870", "title": "Group Navigation for Guided Tours in Distributed Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382870/1saZCxsOG9q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a278", "title": "In Touch with Everyday Objects: Teleportation Techniques in Virtual Environments Supporting Tangibility", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a278/1tnXjaZXiw0", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysyoGwmze", "doi": "10.1109/ISMAR50242.2020.00079", "title": "View Splicing for Effective VR Collaboration", "normalizedTitle": "View Splicing for Effective VR Collaboration", "abstract": "In a co-located multi-user collaborative virtual reality (VR) application a collaborator should be able to indicate a workspace location to the user, such that they can refer to it simultaneously as they work together. Due to their different viewpoints, the collaborator sees some parts of the virtual environment (VE) that the user does not, and communication breaks down when the collaborator's reference is not visible to the user. The conventional solutions of asking the user to move around to gain line of sight to the collaborator's reference, or of asking the user to toggle back and forth between their view and that of the collaborator can be inefficient and ineffective. This paper proposes a method for improving collaboration in VR by alleviating the disparity between the user and the collaborator views of the VE. The user is shown a multiperspective visualization of the VE that transitions smoothly from the user to the collaborator's perspective. The multiperpsective visualization is based on the switch camera, a novel camera model with curved rays that splice together the user and collaborator views. The multiperspective visualization is computed first by warping the VE geometry, through projection with the switch camera followed by unprojection with a conventional camera, and then by rendering the warped VE conventionally, for each user eye. A controlled user study with three tasks shows that VR collaboration using the switch camera multiperpsective visualization is faster, more reliable, and less taxing on the user than the conventional approaches of viewpoint translation or view toggling.", "abstracts": [ { "abstractType": "Regular", "content": "In a co-located multi-user collaborative virtual reality (VR) application a collaborator should be able to indicate a workspace location to the user, such that they can refer to it simultaneously as they work together. Due to their different viewpoints, the collaborator sees some parts of the virtual environment (VE) that the user does not, and communication breaks down when the collaborator's reference is not visible to the user. The conventional solutions of asking the user to move around to gain line of sight to the collaborator's reference, or of asking the user to toggle back and forth between their view and that of the collaborator can be inefficient and ineffective. This paper proposes a method for improving collaboration in VR by alleviating the disparity between the user and the collaborator views of the VE. The user is shown a multiperspective visualization of the VE that transitions smoothly from the user to the collaborator's perspective. The multiperpsective visualization is based on the switch camera, a novel camera model with curved rays that splice together the user and collaborator views. The multiperspective visualization is computed first by warping the VE geometry, through projection with the switch camera followed by unprojection with a conventional camera, and then by rendering the warped VE conventionally, for each user eye. A controlled user study with three tasks shows that VR collaboration using the switch camera multiperpsective visualization is faster, more reliable, and less taxing on the user than the conventional approaches of viewpoint translation or view toggling.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In a co-located multi-user collaborative virtual reality (VR) application a collaborator should be able to indicate a workspace location to the user, such that they can refer to it simultaneously as they work together. Due to their different viewpoints, the collaborator sees some parts of the virtual environment (VE) that the user does not, and communication breaks down when the collaborator's reference is not visible to the user. The conventional solutions of asking the user to move around to gain line of sight to the collaborator's reference, or of asking the user to toggle back and forth between their view and that of the collaborator can be inefficient and ineffective. This paper proposes a method for improving collaboration in VR by alleviating the disparity between the user and the collaborator views of the VE. The user is shown a multiperspective visualization of the VE that transitions smoothly from the user to the collaborator's perspective. The multiperpsective visualization is based on the switch camera, a novel camera model with curved rays that splice together the user and collaborator views. The multiperspective visualization is computed first by warping the VE geometry, through projection with the switch camera followed by unprojection with a conventional camera, and then by rendering the warped VE conventionally, for each user eye. A controlled user study with three tasks shows that VR collaboration using the switch camera multiperpsective visualization is faster, more reliable, and less taxing on the user than the conventional approaches of viewpoint translation or view toggling.", "fno": "850800a509", "keywords": [ "Computational Geometry", "Data Visualisation", "Groupware", "Ray Tracing", "Rendering Computer Graphics", "Virtual Reality", "Multiperspective Visualization", "VR Collaboration", "Multiuser Collaborative Virtual Reality Application", "Switch Camera", "Curved Rays", "VE Geometry Warping", "Rendering", "View Splicing", "Visualization", "Splicing", "Collaboration", "Virtual Environments", "Switches", "Cameras", "Task Analysis", "Virtual Reality", "Collaborative", "Occlusion Management", "Multiperspective Visualization" ], "authors": [ { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China", "fullName": "Lili Wang", "givenName": "Lili", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China", "fullName": "Wentao Wu", "givenName": "Wentao", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China", "fullName": "Zijing Zhou", "givenName": "Zijing", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University,West Lafayette,Indiana,U.S", "fullName": "Voicu Popescu", "givenName": "Voicu", "surname": "Popescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "509-519", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a498", "articleId": "1pyswTqrkZ2", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a520", "articleId": "1pysxMcaE2Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970039", "title": "A Framework for Assisted Exploration with Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970039/12OmNBTs7Hn", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460053", "title": "Combating VR sickness through subtle dynamic field-of-view modification", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460053/12OmNBubORd", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892373", "title": "Application of redirected walking in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948464", "title": "[Poster] View independence in remote collaboration using AR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948464/12OmNzTH0Rn", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/04/07833190", "title": "Bending the Curve: Sensitivity to Bending of Curved Paths and Application in Room-Scale VR", "doi": null, "abstractUrl": "/journal/tg/2017/04/07833190/13rRUIIVlcQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699200", "title": "Effective Free Field of View Scene Exploration in VR and AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090535", "title": "Usability of a Foreign Body Object Scenario in VR for Nursing Education", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090535/1jIxzfEJiSI", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a146", "title": "Representing Real-Time Multi-User Collaboration in Visualizations", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a146/1qRNXFy3oac", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09332290", "title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration", "doi": null, "abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/2021/06/09503101", "title": "Combined Stateful Classification and Session Splicing for High-Speed NFV Service Chaining", "doi": null, "abstractUrl": "/journal/nt/2021/06/09503101/1vJVslsXbJC", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuBtAXtpYc", "doi": "10.1109/VR50410.2021.00043", "title": "Disocclusion Headlight for Selection Assistance in VR", "normalizedTitle": "Disocclusion Headlight for Selection Assistance in VR", "abstract": "We introduce the disocclusion headlight, a method for VR selection assistance based on alleviating occlusions at the center of the user's field of view. The user's visualization of the VE is modified to reduce overlap between objects. This way, selection candidate objects have larger image footprints, which facilitates selection. The modification is confined to the center of the frame, with continuity to the periphery of the frame which is rendered conventionally. The selection assistance is provided automatically, without any interaction from the user. Furthermore, our method disoccludes without destroying the local spatial relationships between selection candidates, which allows solving complex selection queries based on the relative position of objects. We have tested our method on three selection tasks, where we compared it to two state-of-the-art VR selection techniques, i.e., the alpha cursor and the flower cone. Our method showed significant advantages in terms of shorter task completion times, and of fewer selection errors.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce the disocclusion headlight, a method for VR selection assistance based on alleviating occlusions at the center of the user's field of view. The user's visualization of the VE is modified to reduce overlap between objects. This way, selection candidate objects have larger image footprints, which facilitates selection. The modification is confined to the center of the frame, with continuity to the periphery of the frame which is rendered conventionally. The selection assistance is provided automatically, without any interaction from the user. Furthermore, our method disoccludes without destroying the local spatial relationships between selection candidates, which allows solving complex selection queries based on the relative position of objects. We have tested our method on three selection tasks, where we compared it to two state-of-the-art VR selection techniques, i.e., the alpha cursor and the flower cone. Our method showed significant advantages in terms of shorter task completion times, and of fewer selection errors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce the disocclusion headlight, a method for VR selection assistance based on alleviating occlusions at the center of the user's field of view. The user's visualization of the VE is modified to reduce overlap between objects. This way, selection candidate objects have larger image footprints, which facilitates selection. The modification is confined to the center of the frame, with continuity to the periphery of the frame which is rendered conventionally. The selection assistance is provided automatically, without any interaction from the user. Furthermore, our method disoccludes without destroying the local spatial relationships between selection candidates, which allows solving complex selection queries based on the relative position of objects. We have tested our method on three selection tasks, where we compared it to two state-of-the-art VR selection techniques, i.e., the alpha cursor and the flower cone. Our method showed significant advantages in terms of shorter task completion times, and of fewer selection errors.", "fno": "255600a216", "keywords": [ "Query Processing", "Rendering Computer Graphics", "Virtual Reality", "Larger Image Footprints", "Method Disoccludes", "Selection Candidates", "Complex Selection", "Selection Tasks", "State Of The Art VR Selection Techniques", "Fewer Selection Errors", "Disocclusion Headlight", "VR Selection Assistance", "Alleviating Occlusions", "Selection Candidate Objects", "Visualization", "Three Dimensional Displays", "Virtual Reality", "User Interfaces", "Rendering Computer Graphics", "Task Analysis", "Virtual Reality Pointing And Selection Disocclusion Multiperspective Rendering" ], "authors": [ { "affiliation": "Beihang University, Peng Cheng Laboratory", "fullName": "Lili Wang", "givenName": "Lili", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University", "fullName": "Jianjun Chen", "givenName": "Jianjun", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University", "fullName": "Qixiang Ma", "givenName": "Qixiang", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Voicu Popescu", "givenName": "Voicu", "surname": "Popescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "216-225", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuBsancQbS", "name": "pvr202118380-09417655s1-mm_255600a216.zip", "size": "279 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417655s1-mm_255600a216.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a207", "articleId": "1tuB9GkkmY0", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a226", "articleId": "1tuAvyrBEXe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2022/9617/0/961700a832", "title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a582", "title": "Multi-Touch Smartphone-Based Progressive Refinement VR Selection", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a582/1CJcBfmyX5K", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a247", "title": "From attention to action: Key drivers to augment VR experience for everyday consumer applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a247/1CJelwYgfOE", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a460", "title": "Toward Intuitive Acquisition of Occluded VR Objects Through an Interactive Disocclusion Mini-map", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a460/1MNgkshFgXK", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798106", "title": "VR-MOOCs: A Learning Management System for VR Education", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798106/1cJ0Pvi3gwo", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a030", "title": "Immersive Multimodal and Procedurally-Assisted Creation of VR Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a030/1tnXheKhk1q", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a759", "title": "Turning a Messy Room into a Fully Immersive VR Playground", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a759/1tnXiK8j7fq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a235", "title": "Scene-Context-Aware Indoor Object Selection and Movement in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a235/1tuAmQvIgWA", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09507320", "title": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2021/12/09507320/1vNfMheqZ2w", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WHONoj", "doi": "10.1109/CVPR.2018.00533", "title": "V2V-PoseNet: Voxel-to-Voxel Prediction Network for Accurate 3D Hand and Human Pose Estimation from a Single Depth Map", "normalizedTitle": "V2V-PoseNet: Voxel-to-Voxel Prediction Network for Accurate 3D Hand and Human Pose Estimation from a Single Depth Map", "abstract": "Most of the existing deep learning-based methods for 3D hand and human pose estimation from a single depth map are based on a common framework that takes a 2D depth map and directly regresses the 3D coordinates of keypoints, such as hand or human body joints, via 2D convolutional neural networks (CNNs). The first weakness of this approach is the presence of perspective distortion in the 2D depth map. While the depth map is intrinsically 3D data, many previous methods treat depth maps as 2D images that can distort the shape of the actual object through projection from 3D to 2D space. This compels the network to perform perspective distortion-invariant estimation. The second weakness of the conventional approach is that directly regressing 3D coordinates from a 2D image is a highly nonlinear mapping, which causes difficulty in the learning procedure. To overcome these weaknesses, we firstly cast the 3D hand and human pose estimation problem from a single depth map into a voxel-to-voxel prediction that uses a 3D voxelized grid and estimates the per-voxel likelihood for each keypoint. We design our model as a 3D CNN that provides accurate estimates while running in real-time. Our system outperforms previous methods in almost all publicly available 3D hand and human pose estimation datasets and placed first in the HANDS 2017 frame-based 3D hand pose estimation challenge. The code is available in1.", "abstracts": [ { "abstractType": "Regular", "content": "Most of the existing deep learning-based methods for 3D hand and human pose estimation from a single depth map are based on a common framework that takes a 2D depth map and directly regresses the 3D coordinates of keypoints, such as hand or human body joints, via 2D convolutional neural networks (CNNs). The first weakness of this approach is the presence of perspective distortion in the 2D depth map. While the depth map is intrinsically 3D data, many previous methods treat depth maps as 2D images that can distort the shape of the actual object through projection from 3D to 2D space. This compels the network to perform perspective distortion-invariant estimation. The second weakness of the conventional approach is that directly regressing 3D coordinates from a 2D image is a highly nonlinear mapping, which causes difficulty in the learning procedure. To overcome these weaknesses, we firstly cast the 3D hand and human pose estimation problem from a single depth map into a voxel-to-voxel prediction that uses a 3D voxelized grid and estimates the per-voxel likelihood for each keypoint. We design our model as a 3D CNN that provides accurate estimates while running in real-time. Our system outperforms previous methods in almost all publicly available 3D hand and human pose estimation datasets and placed first in the HANDS 2017 frame-based 3D hand pose estimation challenge. The code is available in1.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most of the existing deep learning-based methods for 3D hand and human pose estimation from a single depth map are based on a common framework that takes a 2D depth map and directly regresses the 3D coordinates of keypoints, such as hand or human body joints, via 2D convolutional neural networks (CNNs). The first weakness of this approach is the presence of perspective distortion in the 2D depth map. While the depth map is intrinsically 3D data, many previous methods treat depth maps as 2D images that can distort the shape of the actual object through projection from 3D to 2D space. This compels the network to perform perspective distortion-invariant estimation. The second weakness of the conventional approach is that directly regressing 3D coordinates from a 2D image is a highly nonlinear mapping, which causes difficulty in the learning procedure. To overcome these weaknesses, we firstly cast the 3D hand and human pose estimation problem from a single depth map into a voxel-to-voxel prediction that uses a 3D voxelized grid and estimates the per-voxel likelihood for each keypoint. We design our model as a 3D CNN that provides accurate estimates while running in real-time. Our system outperforms previous methods in almost all publicly available 3D hand and human pose estimation datasets and placed first in the HANDS 2017 frame-based 3D hand pose estimation challenge. The code is available in1.", "fno": "642000f079", "keywords": [ "Feature Extraction", "Image Classification", "Image Segmentation", "Learning Artificial Intelligence", "Neural Nets", "Pose Estimation", "3 D CNN", "HANDS 2017 Frame Based 3 D", "V 2 V Pose Net", "Voxel To Voxel Prediction Network", "Human Pose Estimation", "Single Depth Map", "Human Body Joints", "2 D Convolutional Neural Networks", "Intrinsically 3 D Data", "Perspective Distortion Invariant Estimation", "Highly Nonlinear Mapping", "3 D Voxelized Grid", "Deep Learning Based Methods", "Three Dimensional Displays", "Two Dimensional Displays", "Pose Estimation", "Solid Modeling", "Nonlinear Distortion", "Shape" ], "authors": [ { "affiliation": null, "fullName": "Ju Yong Chang", "givenName": "Ju Yong", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gyeongsik Moon", "givenName": "Gyeongsik", "surname": "Moon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kyoung Mu Lee", "givenName": "Kyoung Mu", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "5079-5088", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000f070", "articleId": "17D45XlyDuS", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000f089", "articleId": "17D45W2Wyzg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457b196", "title": "Crossing Nets: Combining GANs and VAEs with a Shared Latent Space for Hand Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b196/12OmNCwlaey", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032e913", "title": "Learning to Estimate 3D Hand Pose from Single RGB Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/04/08338122", "title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c636", "title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c636/17D45W2Wyyl", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f147", "title": "Dense 3D Regression for Hand Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f147/17D45WaTkeL", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i417", "title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a089", "title": "Cross-Modal Deep Variational Hand Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a089/17D45Xh13pi", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c866", "title": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c866/1i5mvFudr68", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h111", "title": "HandVoxNet: Deep Voxel-Based Network for 3D Hand Shape and Pose Estimation From a Single Depth Map", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h111/1m3nfro8U8g", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09599544", "title": "HandVoxNet++: 3D Hand Shape and Pose Estimation Using Voxel-Based Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2022/12/09599544/1yeC9mCPAty", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmJUc8RsvS", "doi": "10.1109/ICCV48922.2021.01130", "title": "Estimating Egocentric 3D Human Pose in Global Space", "normalizedTitle": "Estimating Egocentric 3D Human Pose in Global Space", "abstract": "Egocentric 3D human pose estimation using a single fisheye camera has become popular recently as it allows capturing a wide range of daily activities in unconstrained environments, which is difficult for traditional outside-in motion capture with external cameras. However, existing methods have several limitations. A prominent problem is that the estimated poses lie in the local coordinate system of the fisheye camera, rather than in the world coordinate system, which is restrictive for many applications. Furthermore, these methods suffer from limited accuracy and temporal instability due to ambiguities caused by the monocular setup and the severe occlusion in a strongly distorted egocentric perspective. To tackle these limitations, we present a new method for egocentric global 3D body pose estimation using a single head-mounted fish-eye camera. To achieve accurate and temporally stable global poses, a spatio-temporal optimization is performed over a sequence of frames by minimizing heatmap reprojection errors and enforcing local and global body motion priors learned from a mocap dataset. Experimental results show that our approach outperforms state-of-the-art methods both quantitatively and qualitatively.", "abstracts": [ { "abstractType": "Regular", "content": "Egocentric 3D human pose estimation using a single fisheye camera has become popular recently as it allows capturing a wide range of daily activities in unconstrained environments, which is difficult for traditional outside-in motion capture with external cameras. However, existing methods have several limitations. A prominent problem is that the estimated poses lie in the local coordinate system of the fisheye camera, rather than in the world coordinate system, which is restrictive for many applications. Furthermore, these methods suffer from limited accuracy and temporal instability due to ambiguities caused by the monocular setup and the severe occlusion in a strongly distorted egocentric perspective. To tackle these limitations, we present a new method for egocentric global 3D body pose estimation using a single head-mounted fish-eye camera. To achieve accurate and temporally stable global poses, a spatio-temporal optimization is performed over a sequence of frames by minimizing heatmap reprojection errors and enforcing local and global body motion priors learned from a mocap dataset. Experimental results show that our approach outperforms state-of-the-art methods both quantitatively and qualitatively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Egocentric 3D human pose estimation using a single fisheye camera has become popular recently as it allows capturing a wide range of daily activities in unconstrained environments, which is difficult for traditional outside-in motion capture with external cameras. However, existing methods have several limitations. A prominent problem is that the estimated poses lie in the local coordinate system of the fisheye camera, rather than in the world coordinate system, which is restrictive for many applications. Furthermore, these methods suffer from limited accuracy and temporal instability due to ambiguities caused by the monocular setup and the severe occlusion in a strongly distorted egocentric perspective. To tackle these limitations, we present a new method for egocentric global 3D body pose estimation using a single head-mounted fish-eye camera. To achieve accurate and temporally stable global poses, a spatio-temporal optimization is performed over a sequence of frames by minimizing heatmap reprojection errors and enforcing local and global body motion priors learned from a mocap dataset. Experimental results show that our approach outperforms state-of-the-art methods both quantitatively and qualitatively.", "fno": "281200l1480", "keywords": [ "Heating Systems", "Computer Vision", "Three Dimensional Displays", "Uncertainty", "Pose Estimation", "Cameras", "Sensors" ], "authors": [ { "affiliation": "MPI Informatics", "fullName": "Jian Wang", "givenName": "Jian", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Lingjie Liu", "givenName": "Lingjie", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Weipeng Xu", "givenName": "Weipeng", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Kripasindhu Sarkar", "givenName": "Kripasindhu", "surname": "Sarkar", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Christian Theobalt", "givenName": "Christian", "surname": "Theobalt", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "11480-11489", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200l1468", "articleId": "1BmFJOryIhi", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200l1490", "articleId": "1BmETugUIp2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csie/2009/3507/3/3507c081", "title": "An Easy Camera Pose Method from Fisheye Image", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507c081/12OmNwdtwfC", "parentPublication": { "id": "proceedings/csie/2009/3507/3", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d501", "title": "Seeing Invisible Poses: Estimating 3D Body Pose from Egocentric Video", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d501/12OmNyk300m", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07299061", "title": "First-person pose recognition using egocentric workspaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07299061/12OmNzlD9rR", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4508", "title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3147", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798267/1cJ0RUiTm8g", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797894", "title": "Generating Synthetic Humans for Learning 3D Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797894/1cJ0Vo2T0ys", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h727", "title": "xR-EgoPose: Egocentric 3D Human Pose From an HMD Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h727/1hQqpGOfz3i", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j887", "title": "You2Me: Inferring Body Pose in Egocentric Video via First and Second Person Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j887/1m3oqO2FWx2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b771", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1ms5RlwuQ", "doi": "10.1109/CVPR52688.2022.01281", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "normalizedTitle": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "abstract": "Egocentric 3D human pose estimation with a single fisheye camera has drawn a significant amount of attention recently. However, existing methods struggle with pose estimation from in-the-wild images, because they can only be trained on synthetic data due to the unavailability of large-scale in-the-wild egocentric datasets. Furthermore, these methods easily fail when the body parts are occluded by or interacting with the surrounding scene. To address the shortage of in-the-wild data, we collect a large-scale in-the-wild egocentric dataset called Egocentric Poses in the Wild (EgoPW). This dataset is captured by a head-mounted fisheye camera and an auxiliary external camera, which provides an additional observation of the human body from a third-person perspective during training. We present a new egocentric pose estimation method, which can be trained on the new dataset with weak external supervision. Specifically, we first generate pseudo labels for the EgoPW dataset with a spatio-temporal optimization method by incorporating the external-view supervision. The pseudo labels are then used to train an egocentric pose estimation network. To facilitate the network training, we propose a novel learning strategy to supervise the egocentric features with the high-quality features extracted by a pretrained external-view pose estimation model. The experiments show that our method predicts accurate 3D poses from a single in-the-wild egocentric image and outperforms the state-of-the-art methods both quantitatively and qualitatively.", "abstracts": [ { "abstractType": "Regular", "content": "Egocentric 3D human pose estimation with a single fisheye camera has drawn a significant amount of attention recently. However, existing methods struggle with pose estimation from in-the-wild images, because they can only be trained on synthetic data due to the unavailability of large-scale in-the-wild egocentric datasets. Furthermore, these methods easily fail when the body parts are occluded by or interacting with the surrounding scene. To address the shortage of in-the-wild data, we collect a large-scale in-the-wild egocentric dataset called Egocentric Poses in the Wild (EgoPW). This dataset is captured by a head-mounted fisheye camera and an auxiliary external camera, which provides an additional observation of the human body from a third-person perspective during training. We present a new egocentric pose estimation method, which can be trained on the new dataset with weak external supervision. Specifically, we first generate pseudo labels for the EgoPW dataset with a spatio-temporal optimization method by incorporating the external-view supervision. The pseudo labels are then used to train an egocentric pose estimation network. To facilitate the network training, we propose a novel learning strategy to supervise the egocentric features with the high-quality features extracted by a pretrained external-view pose estimation model. The experiments show that our method predicts accurate 3D poses from a single in-the-wild egocentric image and outperforms the state-of-the-art methods both quantitatively and qualitatively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Egocentric 3D human pose estimation with a single fisheye camera has drawn a significant amount of attention recently. However, existing methods struggle with pose estimation from in-the-wild images, because they can only be trained on synthetic data due to the unavailability of large-scale in-the-wild egocentric datasets. Furthermore, these methods easily fail when the body parts are occluded by or interacting with the surrounding scene. To address the shortage of in-the-wild data, we collect a large-scale in-the-wild egocentric dataset called Egocentric Poses in the Wild (EgoPW). This dataset is captured by a head-mounted fisheye camera and an auxiliary external camera, which provides an additional observation of the human body from a third-person perspective during training. We present a new egocentric pose estimation method, which can be trained on the new dataset with weak external supervision. Specifically, we first generate pseudo labels for the EgoPW dataset with a spatio-temporal optimization method by incorporating the external-view supervision. The pseudo labels are then used to train an egocentric pose estimation network. To facilitate the network training, we propose a novel learning strategy to supervise the egocentric features with the high-quality features extracted by a pretrained external-view pose estimation model. The experiments show that our method predicts accurate 3D poses from a single in-the-wild egocentric image and outperforms the state-of-the-art methods both quantitatively and qualitatively.", "fno": "694600n3147", "keywords": [ "Feature Extraction", "Learning Artificial Intelligence", "Optimisation", "Pose Estimation", "Ego PW Dataset", "Spatio Temporal Optimization Method", "In The Wild Egocentric Image", "Egocentric 3 D Human Pose", "External Weak Supervision", "In The Wild Egocentric Dataset", "Head Mounted Fisheye Camera", "Auxiliary External Camera", "Human Body", "Egocentric Pose Estimation", "External View Pose Estimation", "Egocentric Poses In The Wild", "Training", "Computer Vision", "Three Dimensional Displays", "Pose Estimation", "Optimization Methods", "Cameras", "Feature Extraction" ], "authors": [ { "affiliation": "MPI Informatics", "fullName": "Jian Wang", "givenName": "Jian", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Lingjie Liu", "givenName": "Lingjie", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs", "fullName": "Weipeng Xu", "givenName": "Weipeng", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Kripasindhu Sarkar", "givenName": "Kripasindhu", "surname": "Sarkar", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Diogo Luvizon", "givenName": "Diogo", "surname": "Luvizon", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatics", "fullName": "Christian Theobalt", "givenName": "Christian", "surname": "Theobalt", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "13147-13156", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1ms2timf6", "name": "pcvpr202269460-09880401s1-mm_694600n3147.zip", "size": "1.76 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880401s1-mm_694600n3147.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600n3137", "articleId": "1H0Lj3ttsjK", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600n3157", "articleId": "1H1htPeL7tm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a506", "title": "Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a506/12OmNxdDFF9", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1480", "title": "Estimating Egocentric 3D Human Pose in Global Space", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798267/1cJ0RUiTm8g", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h727", "title": "xR-EgoPose: Egocentric 3D Human Pose From an HMD Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h727/1hQqpGOfz3i", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/09022017", "title": "Generalizing Monocular 3D Human Pose Estimation in the Wild", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/09022017/1i5mMluVUje", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j887", "title": "You2Me: Inferring Body Pose in Egocentric Video via First and Second Person Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j887/1m3oqO2FWx2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09217955", "title": "SelfPose: 3D Egocentric Pose Estimation From a Headset Mounted Camera", "doi": null, "abstractUrl": "/journal/tp/2023/06/09217955/1nL7o5ZTgnS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b771", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100d449", "title": "Egocentric Indoor Localization from Room Layouts and Image Outer Corners", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100d449/1yNhOGCKoko", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a032", "title": "EgoGlass: Egocentric-View Human Pose Estimation From an Eyeglass Frame", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a032/1zWE6qypWak", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0RUiTm8g", "doi": "10.1109/VR.2019.8798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "normalizedTitle": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "abstract": "Wearable cameras have the potential to be used in various ways in combination with egocentric views such as action recognition, gesture input method for augmented/virtual reality (AR/VR) as well as lifelogger. Particularly, the pose of the camera wearer is one of the interesting factors of the egocentric view and various eccentric view-based pose estimation systems have been proposed; however, there is no balance between recognizable poses and enough egocentric views. In this work, we propose MonoEye, a system to provide wearer's estimated 3D pose and wide egocentric view. Our system's chest-mounted camera, equipped with the ultra-wide fisheye lens, covers the wearer's limbs and wide egocentric view; our pose estimation network estimates 3D body pose of the wearer from the camera's egocentric view. The proposed system not only can be used as an input interface of AR and VR through estimation of a various pose of the wearer but also has a potential to be used for action recognition by providing a wide egocentric view.", "abstracts": [ { "abstractType": "Regular", "content": "Wearable cameras have the potential to be used in various ways in combination with egocentric views such as action recognition, gesture input method for augmented/virtual reality (AR/VR) as well as lifelogger. Particularly, the pose of the camera wearer is one of the interesting factors of the egocentric view and various eccentric view-based pose estimation systems have been proposed; however, there is no balance between recognizable poses and enough egocentric views. In this work, we propose MonoEye, a system to provide wearer's estimated 3D pose and wide egocentric view. Our system's chest-mounted camera, equipped with the ultra-wide fisheye lens, covers the wearer's limbs and wide egocentric view; our pose estimation network estimates 3D body pose of the wearer from the camera's egocentric view. The proposed system not only can be used as an input interface of AR and VR through estimation of a various pose of the wearer but also has a potential to be used for action recognition by providing a wide egocentric view.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Wearable cameras have the potential to be used in various ways in combination with egocentric views such as action recognition, gesture input method for augmented/virtual reality (AR/VR) as well as lifelogger. Particularly, the pose of the camera wearer is one of the interesting factors of the egocentric view and various eccentric view-based pose estimation systems have been proposed; however, there is no balance between recognizable poses and enough egocentric views. In this work, we propose MonoEye, a system to provide wearer's estimated 3D pose and wide egocentric view. Our system's chest-mounted camera, equipped with the ultra-wide fisheye lens, covers the wearer's limbs and wide egocentric view; our pose estimation network estimates 3D body pose of the wearer from the camera's egocentric view. The proposed system not only can be used as an input interface of AR and VR through estimation of a various pose of the wearer but also has a potential to be used for action recognition by providing a wide egocentric view.", "fno": "08798267", "keywords": [ "Cameras", "Gesture Recognition", "Pose Estimation", "Mono Eye", "Wearable Cameras", "Camera Wearer", "Wide Egocentric View", "Ultra Wide Fisheye Lens", "Pose Estimation Network", "Eccentric View Based Pose Estimation Systems", "Augmented Reliability", "Virtual Reality", "Lifelogger", "System Chest Mounted Camera", "Wearer 3 D Pose Estimation", "Camera Egocentric View", "Wearer Limbs", "Input Interface", "Monocular Fisheye Camera Based 3 D Human Pose Estimation", "Three Dimensional Displays", "Cameras", "Prototypes", "Pose Estimation", "Lenses", "Training", "Heating Systems", "Computing Methodologies X 2014 Motion Capture", "Computing Methodologies X 2014 Activity Recognition And Understanding", "Computing Methodologies X 2014 Gestural Input" ], "authors": [ { "affiliation": "Tokyo Institute of Technology", "fullName": "Dong-Hyun Hwang", "givenName": "Dong-Hyun", "surname": "Hwang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Kohei Aso", "givenName": "Kohei", "surname": "Aso", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Hideki Koike", "givenName": "Hideki", "surname": "Koike", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "988-989", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797804", "articleId": "1cJ0NXcPJGo", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797883", "articleId": "1cJ1bz72HBu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851c620", "title": "First Person Action Recognition Using Deep Learned Descriptors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c620/12OmNvHGrwQ", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034c355", "title": "Using Cross-Model EgoSupervision to Learn Cooperative Basketball Intention", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c355/12OmNzwHvbs", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545631", "title": "3D Human Pose Estimation from Deep Multi-View 2D Pose", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545631/17D45WwsQ7m", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0986", "title": "Egocentric Pose Estimation from Human Vision Span", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0986/1BmFRI1m6Ck", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1480", "title": "Estimating Egocentric 3D Human Pose in Global Space", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3147", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956092", "title": "Proprioception-Driven Wearer Pose Estimation for Egocentric Video", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956092/1IHq6jlWPvi", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798060", "title": "Toward human motion capturing with an ultra-wide fisheye camera on the chest", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798060/1cJ12w9YTqE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j887", "title": "You2Me: Inferring Body Pose in Egocentric Video via First and Second Person Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j887/1m3oqO2FWx2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b771", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0Vo2T0ys", "doi": "10.1109/VR.2019.8797894", "title": "Generating Synthetic Humans for Learning 3D Pose Estimation", "normalizedTitle": "Generating Synthetic Humans for Learning 3D Pose Estimation", "abstract": "We generate synthetic annotated data for learning 3D human pose estimation using an egocentric fisheye camera. Synthetic humans are rendered from a virtual fisheye camera, with a random background, random clothing, random lighting parameters. In addition to RGB images, we generate ground truth of 2D/3D poses and location heat-maps. Capturing huge and various images and labeling manually for learning are not required. This approach will be used for the challenging situation such as capturing training data in sports.", "abstracts": [ { "abstractType": "Regular", "content": "We generate synthetic annotated data for learning 3D human pose estimation using an egocentric fisheye camera. Synthetic humans are rendered from a virtual fisheye camera, with a random background, random clothing, random lighting parameters. In addition to RGB images, we generate ground truth of 2D/3D poses and location heat-maps. Capturing huge and various images and labeling manually for learning are not required. This approach will be used for the challenging situation such as capturing training data in sports.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We generate synthetic annotated data for learning 3D human pose estimation using an egocentric fisheye camera. Synthetic humans are rendered from a virtual fisheye camera, with a random background, random clothing, random lighting parameters. In addition to RGB images, we generate ground truth of 2D/3D poses and location heat-maps. Capturing huge and various images and labeling manually for learning are not required. This approach will be used for the challenging situation such as capturing training data in sports.", "fno": "08797894", "keywords": [ "Cameras", "Clothing", "Image Capture", "Image Colour Analysis", "Learning Artificial Intelligence", "Pose Estimation", "Rendering Computer Graphics", "Solid Modelling", "Virtual Reality", "Synthetic Humans", "Synthetic Annotated Data", "Egocentric Fisheye Camera", "Virtual Fisheye Camera", "Random Background", "Random Clothing", "Random Lighting Parameters", "RGB Images", "Location Heat Maps", "Learning 3 D Human Pose Estimation", "2 D Poses", "Cameras", "Three Dimensional Displays", "Solid Modeling", "Lighting", "Pose Estimation", "Clothing", "Shape", "Computing Methodologies", "Motion Capture", "Activity Recognition And Understanding", "Gestural Input" ], "authors": [ { "affiliation": "Tokyo Institute of Technology", "fullName": "Kohei Aso", "givenName": "Kohei", "surname": "Aso", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Dong-Hyun Hwang", "givenName": "Dong-Hyun", "surname": "Hwang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Hideki Koike", "givenName": "Hideki", "surname": "Koike", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1519-1520", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797904", "articleId": "1cJ0JpXbP32", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797802", "articleId": "1cJ0YFQ1Bug", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2016/5407/0/5407a685", "title": "Learning Camera Viewpoint Using CNN to Improve 3D Body Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a685/12OmNqFa5pt", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2016/2305/0/2305a154", "title": "Compositing Real and Synthetic Images: Using Kinect and Fisheye Camera", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2016/2305a154/12OmNzICEP2", "parentPublication": { "id": "proceedings/nicoint/2016/2305/0", "title": "2016 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000a370", "title": "Parameterized Synthetic Image Data Set for Fisheye Lens", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000a370/17D45WaTkc9", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e663", "title": "Feature Mapping for Learning Fast and Accurate 3D Pose Inference from Synthetic Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e663/17D45WaTke4", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1480", "title": "Estimating Egocentric 3D Human Pose in Global Space", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3147", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a475", "title": "SIDOD: A Synthetic Image Dataset for 3D Object Pose Recognition With Distractors", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a475/1iTvum5DfXy", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093563", "title": "Learning from THEODORE: A Synthetic Omnidirectional Top-View Indoor Dataset for Deep Transfer Learning", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093563/1jPbCpUwBNK", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a332", "title": "A Study on the Impact of Domain Randomization for Monocular Deep 6DoF Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a332/1p2VASaMsI8", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b771", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ12w9YTqE", "doi": "10.1109/VR.2019.8798060", "title": "Toward human motion capturing with an ultra-wide fisheye camera on the chest", "normalizedTitle": "Toward human motion capturing with an ultra-wide fisheye camera on the chest", "abstract": "We are interested in utilizing egocentric view from a wearable camera and are working on MonoEye system, a novel system to estimate the wearer's motion using a chest-mounted camera equipped with an ultra-wide fisheye lens. Because our system has a wide field of view, it provides a balanced capacity of recognizable pose types and broad egocentric view. The prototype deep neural network estimates camera wearer's 3D pose and acquires motion without complex configuration like conventional motion capture systems.", "abstracts": [ { "abstractType": "Regular", "content": "We are interested in utilizing egocentric view from a wearable camera and are working on MonoEye system, a novel system to estimate the wearer's motion using a chest-mounted camera equipped with an ultra-wide fisheye lens. Because our system has a wide field of view, it provides a balanced capacity of recognizable pose types and broad egocentric view. The prototype deep neural network estimates camera wearer's 3D pose and acquires motion without complex configuration like conventional motion capture systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We are interested in utilizing egocentric view from a wearable camera and are working on MonoEye system, a novel system to estimate the wearer's motion using a chest-mounted camera equipped with an ultra-wide fisheye lens. Because our system has a wide field of view, it provides a balanced capacity of recognizable pose types and broad egocentric view. The prototype deep neural network estimates camera wearer's 3D pose and acquires motion without complex configuration like conventional motion capture systems.", "fno": "08798060", "keywords": [ "Cameras", "Image Motion Analysis", "Lenses", "Neural Nets", "Pose Estimation", "Ultra Wide Fisheye Lens", "Balanced Capacity", "Broad Egocentric View", "Prototype Deep Neural Network", "Human Motion Capturing", "Ultra Wide Fisheye Camera", "Wearable Camera", "Mono Eye System", "Chest Mounted Camera", "Camera Wearer 3 D Pose", "Wearer Motion Acquisition", "Cameras", "Three Dimensional Displays", "Prototypes", "Videos", "Lenses", "Neural Networks", "Heating Systems", "Computing Methodologies", "Motion Capture", "Activity Recognition And Understanding", "Gestural Input" ], "authors": [ { "affiliation": "Tokyo Institute of Technology", "fullName": "Dong-Hyun Hwang", "givenName": "Dong-Hyun", "surname": "Hwang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Kohei Aso", "givenName": "Kohei", "surname": "Aso", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Hideki Koike", "givenName": "Hideki", "surname": "Koike", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1524-1526", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797802", "articleId": "1cJ0YFQ1Bug", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798196", "articleId": "1cJ1cRKxDzO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2015/6964/0/07298625", "title": "Delving into egocentric actions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298625/12OmNrNh0ru", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c537", "title": "Temporal Segmentation of Egocentric Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c537/12OmNs59JN1", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c620", "title": "First Person Action Recognition Using Deep Learned Descriptors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c620/12OmNvHGrwQ", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926640", "title": "Computing Egomotion with Local Loop Closures for Egocentric Videos", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926640/12OmNviHKiX", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840d216", "title": "Learning to Predict Gaze in Egocentric Video", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840d216/12OmNx5GU3r", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477708", "title": "Compact CNN for indexing egocentric videos", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477708/12OmNzE54Gp", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034c355", "title": "Using Cross-Model EgoSupervision to Learn Cooperative Basketball Intention", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c355/12OmNzwHvbs", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798267/1cJ0RUiTm8g", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j887", "title": "You2Me: Inferring Body Pose in Egocentric Video via First and Second Person Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j887/1m3oqO2FWx2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09394804", "title": "Multiple Human Association and Tracking From Egocentric and Complementary Top Views", "doi": null, "abstractUrl": "/journal/tp/2022/09/09394804/1strgORKZsA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hQqpGOfz3i", "doi": "10.1109/ICCV.2019.00782", "title": "xR-EgoPose: Egocentric 3D Human Pose From an HMD Camera", "normalizedTitle": "xR-EgoPose: Egocentric 3D Human Pose From an HMD Camera", "abstract": "We present a new solution to egocentric 3D body pose estimation from monocular images captured from a downward looking fish-eye camera installed on the rim of a head mounted virtual reality device. This unusual viewpoint, just 2 cm away from the user's face, leads to images with unique visual appearance, characterized by severe self-occlusions and strong perspective distortions that result in a drastic difference in resolution between lower and upper body. Our contribution is two-fold. Firstly, we propose a new encoder-decoder architecture with a novel dual branch decoder designed specifically to account for the varying uncertainty in the 2D joint locations. Our quantitative evaluation, both on synthetic and real-world datasets, shows that our strategy leads to substantial improvements in accuracy over state of the art egocentric pose estimation approaches. Our second contribution is a new large-scale photorealistic synthetic dataset - xR-EgoPose - offering 383K frames of high quality renderings ofpeople with a diversity of skin tones, body shapes, clothing, in a variety of backgrounds and lighting conditions, performing a range of actions. Our experiments show that the high variability in our new synthetic training corpus leads to good generalization to real world footage and to state of the art results on real world datasets with ground truth. Moreover, an evaluation on the Human3.6M benchmark shows that the performance of our method is on par with top performing approaches on the more classic problem of 3D human pose from a third person viewpoint.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new solution to egocentric 3D body pose estimation from monocular images captured from a downward looking fish-eye camera installed on the rim of a head mounted virtual reality device. This unusual viewpoint, just 2 cm away from the user's face, leads to images with unique visual appearance, characterized by severe self-occlusions and strong perspective distortions that result in a drastic difference in resolution between lower and upper body. Our contribution is two-fold. Firstly, we propose a new encoder-decoder architecture with a novel dual branch decoder designed specifically to account for the varying uncertainty in the 2D joint locations. Our quantitative evaluation, both on synthetic and real-world datasets, shows that our strategy leads to substantial improvements in accuracy over state of the art egocentric pose estimation approaches. Our second contribution is a new large-scale photorealistic synthetic dataset - xR-EgoPose - offering 383K frames of high quality renderings ofpeople with a diversity of skin tones, body shapes, clothing, in a variety of backgrounds and lighting conditions, performing a range of actions. Our experiments show that the high variability in our new synthetic training corpus leads to good generalization to real world footage and to state of the art results on real world datasets with ground truth. Moreover, an evaluation on the Human3.6M benchmark shows that the performance of our method is on par with top performing approaches on the more classic problem of 3D human pose from a third person viewpoint.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new solution to egocentric 3D body pose estimation from monocular images captured from a downward looking fish-eye camera installed on the rim of a head mounted virtual reality device. This unusual viewpoint, just 2 cm away from the user's face, leads to images with unique visual appearance, characterized by severe self-occlusions and strong perspective distortions that result in a drastic difference in resolution between lower and upper body. Our contribution is two-fold. Firstly, we propose a new encoder-decoder architecture with a novel dual branch decoder designed specifically to account for the varying uncertainty in the 2D joint locations. Our quantitative evaluation, both on synthetic and real-world datasets, shows that our strategy leads to substantial improvements in accuracy over state of the art egocentric pose estimation approaches. Our second contribution is a new large-scale photorealistic synthetic dataset - xR-EgoPose - offering 383K frames of high quality renderings ofpeople with a diversity of skin tones, body shapes, clothing, in a variety of backgrounds and lighting conditions, performing a range of actions. Our experiments show that the high variability in our new synthetic training corpus leads to good generalization to real world footage and to state of the art results on real world datasets with ground truth. Moreover, an evaluation on the Human3.6M benchmark shows that the performance of our method is on par with top performing approaches on the more classic problem of 3D human pose from a third person viewpoint.", "fno": "480300h727", "keywords": [ "Cameras", "Helmet Mounted Displays", "Image Capture", "Image Motion Analysis", "Image Sensors", "Pose Estimation", "Realistic Images", "Rendering Computer Graphics", "Solid Modelling", "Video Signal Processing", "Virtual Reality", "X R Ego Pose", "HMD Camera", "Monocular Images", "Fish Eye Camera", "Unique Visual Appearance", "Encoder Decoder Architecture", "2 D Joint Locations", "Large Scale Photorealistic Synthetic Dataset", "High Quality Renderings", "Head Mounted Virtual Reality Device", "Dual Branch Decoder Design", "Egocentric 3 D Human Body Pose Estimation", "Three Dimensional Displays", "Cameras", "Pose Estimation", "Two Dimensional Displays", "Training", "Resists", "Uncertainty" ], "authors": [ { "affiliation": "UCL", "fullName": "Denis Tome", "givenName": "Denis", "surname": "Tome", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook", "fullName": "Patrick Peluse", "givenName": "Patrick", "surname": "Peluse", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Lourdes Agapito", "givenName": "Lourdes", "surname": "Agapito", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook", "fullName": "Hernan Badino", "givenName": "Hernan", "surname": "Badino", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "7727-7737", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300h717", "articleId": "1hVlLjR0Chq", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300h738", "articleId": "1hQqtWYyufS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2016/5407/0/5407a685", "title": "Learning Camera Viewpoint Using CNN to Improve 3D Body Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a685/12OmNqFa5pt", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d501", "title": "Seeing Invisible Poses: Estimating 3D Body Pose from Egocentric Video", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d501/12OmNyk300m", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1480", "title": "Estimating Egocentric 3D Human Pose in Global Space", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3147", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956092", "title": "Proprioception-Driven Wearer Pose Estimation for Egocentric Video", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956092/1IHq6jlWPvi", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798267/1cJ0RUiTm8g", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j887", "title": "You2Me: Inferring Body Pose in Egocentric Video via First and Second Person Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j887/1m3oqO2FWx2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09217955", "title": "SelfPose: 3D Egocentric Pose Estimation From a Headset Mounted Camera", "doi": null, "abstractUrl": "/journal/tp/2023/06/09217955/1nL7o5ZTgnS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b771", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a032", "title": "EgoGlass: Egocentric-View Human Pose Estimation From an Eyeglass Frame", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a032/1zWE6qypWak", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3nfro8U8g", "doi": "10.1109/CVPR42600.2020.00714", "title": "HandVoxNet: Deep Voxel-Based Network for 3D Hand Shape and Pose Estimation From a Single Depth Map", "normalizedTitle": "HandVoxNet: Deep Voxel-Based Network for 3D Hand Shape and Pose Estimation From a Single Depth Map", "abstract": "3D hand shape and pose estimation from a single depth map is a new and challenging computer vision problem with many applications. The state-of-the-art methods directly regress 3D hand meshes from 2D depth images via 2D convolutional neural networks, which leads to artefacts in the estimations due to perspective distortions in the images. In contrast, we propose a novel architecture with 3D convolutions trained in a weakly-supervised manner. The input to our method is a 3D voxelized depth map, and we rely on two hand shape representations. The first one is the 3D voxelized grid of the shape which is accurate but does not preserve the mesh topology and the number of mesh vertices. The second representation is the 3D hand surface which is less accurate but does not suffer from the limitations of the first representation. We combine the advantages of these two representations by registering the hand surface to the voxelized hand shape. In the extensive experiments, the proposed approach improves over the state of the art by47.8% on the SynHand5M dataset. Moreover, our augmentation policy for voxelized depth maps further enhances the accuracy of 3D hand pose estimation on real data. Our method produces visually more reasonable and realistic hand shapes on NYU and BigHand2.2M datasets compared to the existing approaches.", "abstracts": [ { "abstractType": "Regular", "content": "3D hand shape and pose estimation from a single depth map is a new and challenging computer vision problem with many applications. The state-of-the-art methods directly regress 3D hand meshes from 2D depth images via 2D convolutional neural networks, which leads to artefacts in the estimations due to perspective distortions in the images. In contrast, we propose a novel architecture with 3D convolutions trained in a weakly-supervised manner. The input to our method is a 3D voxelized depth map, and we rely on two hand shape representations. The first one is the 3D voxelized grid of the shape which is accurate but does not preserve the mesh topology and the number of mesh vertices. The second representation is the 3D hand surface which is less accurate but does not suffer from the limitations of the first representation. We combine the advantages of these two representations by registering the hand surface to the voxelized hand shape. In the extensive experiments, the proposed approach improves over the state of the art by47.8% on the SynHand5M dataset. Moreover, our augmentation policy for voxelized depth maps further enhances the accuracy of 3D hand pose estimation on real data. Our method produces visually more reasonable and realistic hand shapes on NYU and BigHand2.2M datasets compared to the existing approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D hand shape and pose estimation from a single depth map is a new and challenging computer vision problem with many applications. The state-of-the-art methods directly regress 3D hand meshes from 2D depth images via 2D convolutional neural networks, which leads to artefacts in the estimations due to perspective distortions in the images. In contrast, we propose a novel architecture with 3D convolutions trained in a weakly-supervised manner. The input to our method is a 3D voxelized depth map, and we rely on two hand shape representations. The first one is the 3D voxelized grid of the shape which is accurate but does not preserve the mesh topology and the number of mesh vertices. The second representation is the 3D hand surface which is less accurate but does not suffer from the limitations of the first representation. We combine the advantages of these two representations by registering the hand surface to the voxelized hand shape. In the extensive experiments, the proposed approach improves over the state of the art by47.8% on the SynHand5M dataset. Moreover, our augmentation policy for voxelized depth maps further enhances the accuracy of 3D hand pose estimation on real data. Our method produces visually more reasonable and realistic hand shapes on NYU and BigHand2.2M datasets compared to the existing approaches.", "fno": "716800h111", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Feature Extraction", "Image Representation", "Learning Artificial Intelligence", "Mesh Generation", "Neural Net Architecture", "Pose Estimation", "Regression Analysis", "NYU Datasets", "Big Hand 2 2 M Datasets", "Syn Hand 5 M Dataset", "3 D Hand Surface", "Mesh Vertices", "Mesh Topology", "Hand Vox Net", "New Computer Vision Problem", "Single Depth Map", "3 D Hand Pose Estimation", "3 D Hand Shape", "Deep Voxel Based Network", "Realistic Hand Shapes", "Voxelized Depth Maps", "Voxelized Hand Shape", "3 D Voxelized Grid", "Hand Shape Representations", "3 D Voxelized Depth Map", "2 D Convolutional Neural Networks", "2 D Depth Images", "3 D Hand Meshes", "Shape", "Three Dimensional Displays", "Pose Estimation", "Two Dimensional Displays", "Heating Systems", "Distortion" ], "authors": [ { "affiliation": "TU Kaiserslautern; DFKI Kaiserslautern; NUST Pakistan", "fullName": "Jameel Malik", "givenName": "Jameel", "surname": "Malik", "__typename": "ArticleAuthorType" }, { "affiliation": "TU Kaiserslautern; DFKI Kaiserslautern", "fullName": "Ibrahim Abdelaziz", "givenName": "Ibrahim", "surname": "Abdelaziz", "__typename": "ArticleAuthorType" }, { "affiliation": "DFKI Kaiserslautern; UPM Saudi Arabia", "fullName": "Ahmed Elhayek", "givenName": "Ahmed", "surname": "Elhayek", "__typename": "ArticleAuthorType" }, { "affiliation": "MPII Saarland", "fullName": "Soshi Shimada", "givenName": "Soshi", "surname": "Shimada", "__typename": "ArticleAuthorType" }, { "affiliation": "TU Kaiserslautern; DFKI Kaiserslautern", "fullName": "Sk Aziz Ali", "givenName": "Sk Aziz", "surname": "Ali", "__typename": "ArticleAuthorType" }, { "affiliation": "MPII Saarland", "fullName": "Vladislav Golyanik", "givenName": "Vladislav", "surname": "Golyanik", "__typename": "ArticleAuthorType" }, { "affiliation": "MPII Saarland", "fullName": "Christian Theobalt", "givenName": "Christian", "surname": "Theobalt", "__typename": "ArticleAuthorType" }, { "affiliation": "TU Kaiserslautern; DFKI Kaiserslautern", "fullName": "Didier Stricker", "givenName": "Didier", "surname": "Stricker", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "7111-7120", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800h101", "articleId": "1m3obU0FzVK", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800h121", "articleId": "1m3ohqm2Qg0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851d593", "title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d593/12OmNASraXC", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f679", "title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscv/2017/4062/0/08054904", "title": "Hand pose estimation based on deep learning depth map for hand gesture recognition", "doi": null, "abstractUrl": "/proceedings-article/iscv/2017/08054904/12OmNwFid2e", "parentPublication": { "id": "proceedings/iscv/2017/4062/0", "title": "2017 Intelligent Systems and Computer Vision (ISCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032e913", "title": "Learning to Estimate 3D Hand Pose from Single RGB Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/04/08338122", "title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c636", "title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c636/17D45W2Wyyl", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f079", "title": "V2V-PoseNet: Voxel-to-Voxel Prediction Network for Accurate 3D Hand and Human Pose Estimation from a Single Depth Map", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f079/17D45WHONoj", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f147", "title": "Dense 3D Regression for Hand Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f147/17D45WaTkeL", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c866", "title": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c866/1i5mvFudr68", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09599544", "title": "HandVoxNet++: 3D Hand Shape and Pose Estimation Using Voxel-Based Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2022/12/09599544/1yeC9mCPAty", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uqGdWlamUo", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uqGwG2xnQQ", "doi": "10.1109/WACV48630.2021.00181", "title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "normalizedTitle": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image", "abstract": "We propose a method for egocentric 3D human pose estimation from a single image captured by a fisheye camera. The problem of estimating the egocentric 3D pose for a fisheye camera is that images may be subject to strong image distortions (e.g. 2D poses on the image plane that pass through the line of sight of the fisheye lens).Therefore, in this paper, we approach this problem by an automatic calibration module. Given a single image, our network first estimates 3D joint locations of a human in camera coordinates. To alleviate the impact of image distortions on 3D human pose estimation, we then use the automatic calibration to further regularize the 3D predictions. Experimental results demonstrate that the proposed method achieves state-of-the-art performance.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a method for egocentric 3D human pose estimation from a single image captured by a fisheye camera. The problem of estimating the egocentric 3D pose for a fisheye camera is that images may be subject to strong image distortions (e.g. 2D poses on the image plane that pass through the line of sight of the fisheye lens).Therefore, in this paper, we approach this problem by an automatic calibration module. Given a single image, our network first estimates 3D joint locations of a human in camera coordinates. To alleviate the impact of image distortions on 3D human pose estimation, we then use the automatic calibration to further regularize the 3D predictions. Experimental results demonstrate that the proposed method achieves state-of-the-art performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a method for egocentric 3D human pose estimation from a single image captured by a fisheye camera. The problem of estimating the egocentric 3D pose for a fisheye camera is that images may be subject to strong image distortions (e.g. 2D poses on the image plane that pass through the line of sight of the fisheye lens).Therefore, in this paper, we approach this problem by an automatic calibration module. Given a single image, our network first estimates 3D joint locations of a human in camera coordinates. To alleviate the impact of image distortions on 3D human pose estimation, we then use the automatic calibration to further regularize the 3D predictions. Experimental results demonstrate that the proposed method achieves state-of-the-art performance.", "fno": "047700b771", "keywords": [ "Calibration", "Cameras", "Image Sensors", "Pose Estimation", "Fisheye Camera", "Egocentric 3 D Human Pose Estimation", "Single Image", "Strong Image Distortions", "2 D Poses", "Image Plane", "Fisheye Lens", "Automatic Calibration Module", "3 D Joint Locations", "Camera Coordinates", "Computer Vision", "Three Dimensional Displays", "Conferences", "Pose Estimation", "Cameras", "Distortion", "Calibration" ], "authors": [ { "affiliation": "University of Amsterdam", "fullName": "Yahui Zhang", "givenName": "Yahui", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Amsterdam", "fullName": "Shaodi You", "givenName": "Shaodi", "surname": "You", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Amsterdam", "fullName": "Theo Gevers", "givenName": "Theo", "surname": "Gevers", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "1771-1780", "year": "2021", "issn": null, "isbn": "978-1-6654-0477-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "047700b760", "articleId": "1uqGnyhqL6g", "__typename": "AdjacentArticleType" }, "next": { "fno": "047700b781", "articleId": "1uqGii8BU6k", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csie/2009/3507/3/3507c081", "title": "An Easy Camera Pose Method from Fisheye Image", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507c081/12OmNwdtwfC", "parentPublication": { "id": "proceedings/csie/2009/3507/3", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643070", "title": "<italic>Mo<sup>2</sup>Cap<sup>2</sup></italic>: Real-time Mobile 3D <italic>Mo</italic>tion <italic>Capture</italic> with a <italic>Cap</italic>-mounted Fisheye Camera", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643070/17PYEjrlgBQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1480", "title": "Estimating Egocentric 3D Human Pose in Global Space", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4508", "title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3147", "title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798267", "title": "MonoEye: Monocular Fisheye Camera-based 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798267/1cJ0RUiTm8g", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797894", "title": "Generating Synthetic Humans for Learning 3D Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797894/1cJ0Vo2T0ys", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798060", "title": "Toward human motion capturing with an ultra-wide fisheye camera on the chest", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798060/1cJ12w9YTqE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09217955", "title": "SelfPose: 3D Egocentric Pose Estimation From a Headset Mounted Camera", "doi": null, "abstractUrl": "/journal/tp/2023/06/09217955/1nL7o5ZTgnS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b174", "title": "Deep Single Fisheye Image Camera Calibration for Over 180-degree Projection of Field of View", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b174/1yNinwg4Lvy", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1aIRXQWdiUM", "title": "2019 International Conference on Information Networking (ICOIN)", "acronym": "icoin", "groupId": "1000363", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1aIS0jzivhC", "doi": "10.1109/ICOIN.2019.8718151", "title": "Cooperative Server-Client HTTP Adaptive Streaming System for Live Video Streaming", "normalizedTitle": "Cooperative Server-Client HTTP Adaptive Streaming System for Live Video Streaming", "abstract": "In this work, we propose a cooperative server-client HTTP adaptive streaming system to provide a high-quality live video streaming service. In the proposed system, the server adaptively encodes the video segment to improve the bandwidth utilization of clients according to the bandwidth information collected from clients. Additionally, the client selects the segment bitrate by considering the bandwidth utilization, quality difference, and buffered playback time. The performance of the proposed system is verified through a simulation. The simulation results show that the proposed system can provide higher bandwidth utilization and lower quality difference than existing HTTP adaptive streaming systems.", "abstracts": [ { "abstractType": "Regular", "content": "In this work, we propose a cooperative server-client HTTP adaptive streaming system to provide a high-quality live video streaming service. In the proposed system, the server adaptively encodes the video segment to improve the bandwidth utilization of clients according to the bandwidth information collected from clients. Additionally, the client selects the segment bitrate by considering the bandwidth utilization, quality difference, and buffered playback time. The performance of the proposed system is verified through a simulation. The simulation results show that the proposed system can provide higher bandwidth utilization and lower quality difference than existing HTTP adaptive streaming systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work, we propose a cooperative server-client HTTP adaptive streaming system to provide a high-quality live video streaming service. In the proposed system, the server adaptively encodes the video segment to improve the bandwidth utilization of clients according to the bandwidth information collected from clients. Additionally, the client selects the segment bitrate by considering the bandwidth utilization, quality difference, and buffered playback time. The performance of the proposed system is verified through a simulation. The simulation results show that the proposed system can provide higher bandwidth utilization and lower quality difference than existing HTTP adaptive streaming systems.", "fno": "08718151", "keywords": [ "Cooperative Communication", "Hypermedia", "Transport Protocols", "Video Coding", "Video Streaming", "High Quality Live Video Streaming Service", "Bandwidth Information", "Bandwidth Utilization", "Cooperative Server Client HTTP Adaptive Streaming System", "Video Segment Encoding", "Segment Bitrate", "Quality Difference", "Buffered Playback Time", "Bandwidth", "Bit Rate", "Servers", "Encoding", "Streaming Media", "Video Recording", "Quality Assessment", "HTTP Adaptive Streaming", "Live Video Streaming", "MPEG DASH", "Bandwidth Utilization", "Encoding Bitrate Decision", "K Means Clustering", "Segment Adaptation" ], "authors": [ { "affiliation": "Department of Computer Science and Engineering POSTECH Pohang, Republic of Korea", "fullName": "Sangwook Han", "givenName": "Sangwook", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering POSTECH Pohang, Republic of Korea", "fullName": "Yunmin Go", "givenName": "Yunmin", "surname": "Go", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering POSTECH Pohang, Republic of Korea", "fullName": "Hyunmin Noh", "givenName": "Hyunmin", "surname": "Noh", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering POSTECH Pohang, Republic of Korea", "fullName": "Hwangjun Song", "givenName": "Hwangjun", "surname": "Song", "__typename": "ArticleAuthorType" } ], "idPrefix": "icoin", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-01-01T00:00:00", "pubType": "proceedings", "pages": "176-180", "year": "2019", "issn": "1976-7684", "isbn": "978-1-5386-8350-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08718114", "articleId": "1aIS0GtYRiw", "__typename": "AdjacentArticleType" }, "next": { "fno": "08718109", "articleId": "1aIS2keBNao", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icoin/2016/1724/0/07427112", "title": "Client-side rate adaptation scheme for HTTP adaptive streaming based on playout buffer model", "doi": null, "abstractUrl": "/proceedings-article/icoin/2016/07427112/12OmNBSBkbo", "parentPublication": { "id": "proceedings/icoin/2016/1724/0", "title": "2016 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2017/1792/0/1792a298", "title": "FLARE: Coordinated Rate Adaptation for HTTP Adaptive Streaming in Cellular Networks", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2017/1792a298/12OmNCd2rN1", "parentPublication": { "id": "proceedings/icdcs/2017/1792/0", "title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2016/1724/0/07427115", "title": "Dynamic segment duration control for live streaming over HTTP", "doi": null, "abstractUrl": "/proceedings-article/icoin/2016/07427115/12OmNvD8RyK", "parentPublication": { "id": "proceedings/icoin/2016/1724/0", "title": "2016 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cnsm/2014/67/0/07014197", "title": "HTTP rate adaptive algorithm with high bandwidth utilization", "doi": null, "abstractUrl": "/proceedings-article/cnsm/2014/07014197/12OmNyRPgIj", "parentPublication": { "id": "proceedings/cnsm/2014/67/0", "title": "2014 10th International Conference on Network and Service Management (CNSM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bwcca/2015/8315/0/8315a082", "title": "Modeling for Short-Form HTTP Adaptive Streaming Considering Memory Effect", "doi": null, "abstractUrl": "/proceedings-article/bwcca/2015/8315a082/12OmNz6iOvM", "parentPublication": { "id": "proceedings/bwcca/2015/8315/0", "title": "2015 10th International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890309", "title": "QoE continuum driven HTTP adaptive streaming over multi-client wireless networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890309/12OmNzICESu", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2016/1724/0/07427114", "title": "Video quality adaptation scheme for improving QoE in HTTP adaptive streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2016/07427114/12OmNzUxOex", "parentPublication": { "id": "proceedings/icoin/2016/1724/0", "title": "2016 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012028", "title": "Distributed & adaptive HTTP streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012028/12OmNzX6cpb", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-euc/2013/5088/0/06832227", "title": "A Rate Adaptation Solution for Distance Education System over HTTP Streaming", "doi": null, "abstractUrl": "/proceedings-article/hpcc-euc/2013/06832227/12OmNzuZUpd", "parentPublication": { "id": "proceedings/hpcc-euc/2013/5088/0", "title": "2013 IEEE International Conference on High Performance Computing and Communications (HPCC) & 2013 IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icii/2019/2977/0/297700a268", "title": "QoE Control for Dynamic Adaptive Video Streaming Over HTTP at Access Point", "doi": null, "abstractUrl": "/proceedings-article/icii/2019/297700a268/1jXvhzMvjX2", "parentPublication": { "id": "proceedings/icii/2019/2977/0", "title": "2019 IEEE International Conference on Industrial Internet (ICII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1dZO3zX2", "doi": "10.1109/VR.2019.8798064", "title": "ExLeap: Minimal and highly available telepresence system creating leaping experience", "normalizedTitle": "ExLeap: Minimal and highly available telepresence system creating leaping experience", "abstract": "We propose &#x201C;ExLeap&#x201D;, a minimal telepresence system that creates leaping experience. Multiple &#x201C;nodes&#x201D; with an omnidirectional camera, mic and speaker transmit the video to clients, and on the client, videos are rendered in 3D space. When moving to another node, by crossfading two videos, the user can feel as if she/he leaps between two places. Also, on each node, the user can talk with people in that place. Each node consists of very simple hardware, so we can put them on multiple places we want to go to. Moreover, because the system can be used 24/7 by multi-user simultaneously and is very easy to use, it creates various types of chances of communications.", "abstracts": [ { "abstractType": "Regular", "content": "We propose &#x201C;ExLeap&#x201D;, a minimal telepresence system that creates leaping experience. Multiple &#x201C;nodes&#x201D; with an omnidirectional camera, mic and speaker transmit the video to clients, and on the client, videos are rendered in 3D space. When moving to another node, by crossfading two videos, the user can feel as if she/he leaps between two places. Also, on each node, the user can talk with people in that place. Each node consists of very simple hardware, so we can put them on multiple places we want to go to. Moreover, because the system can be used 24/7 by multi-user simultaneously and is very easy to use, it creates various types of chances of communications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose “ExLeap”, a minimal telepresence system that creates leaping experience. Multiple “nodes” with an omnidirectional camera, mic and speaker transmit the video to clients, and on the client, videos are rendered in 3D space. When moving to another node, by crossfading two videos, the user can feel as if she/he leaps between two places. Also, on each node, the user can talk with people in that place. Each node consists of very simple hardware, so we can put them on multiple places we want to go to. Moreover, because the system can be used 24/7 by multi-user simultaneously and is very easy to use, it creates various types of chances of communications.", "fno": "08798064", "keywords": [ "Rendering Computer Graphics", "Teleconferencing", "Telecontrol", "Video Communication", "Virtual Reality", "Leaping Experience", "Minimal Telepresence System", "Multiple Nodes", "Omnidirectional Camera", "Ex Leap", "Cameras", "Telepresence", "Robot Sensing Systems", "Three Dimensional Displays", "Urban Areas", "Conferences", "Human Centered Computing", "Computer Supported Cooperative Work", "Interaction Design", "Information Systems", "Web Conferencing" ], "authors": [ { "affiliation": "Research Center for Advanced Science and Technology, The University of Tokyo", "fullName": "Atsushi Izumihara", "givenName": "Atsushi", "surname": "Izumihara", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center for Advanced Science and Technology, The University of Tokyo", "fullName": "Daisuke Uriu", "givenName": "Daisuke", "surname": "Uriu", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center for Advanced Science and Technology, The University of Tokyo", "fullName": "Atsushi Hiyama", "givenName": "Atsushi", "surname": "Hiyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center for Advanced Science and Technology, The University of Tokyo", "fullName": "Masahiko Inami", "givenName": "Masahiko", "surname": "Inami", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1321-1322", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798230", "articleId": "1cJ0H8UOY1y", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797762", "articleId": "1cJ0ToWEx9K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/visap/2022/6365/0/636500a053", "title": "The Memory Of Street Hong Kong: History, Culture, Memory And Post-Humanism", "doi": null, "abstractUrl": "/proceedings-article/visap/2022/636500a053/1J7WAjZ2X72", "parentPublication": { "id": "proceedings/visap/2022/6365/0", "title": "2022 IEEE VIS Arts Program (VISAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a718", "title": "How Far is It? Distance Estimation and Reproduction Through a Double 3 Telepresence Robot", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a718/1J7Wq3RYsx2", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icenit/2022/6307/0/630700a247", "title": "Online Education and Teaching Management System under the Background of &#x201C;Internet +&#x201D;", "doi": null, "abstractUrl": "/proceedings-article/icenit/2022/630700a247/1KCSFB4EwN2", "parentPublication": { "id": "proceedings/icenit/2022/6307/0", "title": "2022 International Conference on Education, Network and Information Technology (ICENIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icenit/2022/6307/0/630700a043", "title": "The Design and Implementation of Information-Based Teaching Skills Training Platform for Primary and Secondary School Teachers", "doi": null, "abstractUrl": "/proceedings-article/icenit/2022/630700a043/1KCSKNO0dSU", "parentPublication": { "id": "proceedings/icenit/2022/6307/0", "title": "2022 International Conference on Education, Network and Information Technology (ICENIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icuems/2020/8832/0/09151541", "title": "Analysis of the Urban Morphological Evolution of Baisha Sandbank in Early-Modern Wuhan (1861&#x2013;1949)", "doi": null, "abstractUrl": "/proceedings-article/icuems/2020/09151541/1lRlQQ5elwc", "parentPublication": { "id": "proceedings/icuems/2020/8832/0", "title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icuems/2020/8832/0/09151599", "title": "&#x201C;The Frontier Political Science&#x201D;: A Model of The Border City Policing Research", "doi": null, "abstractUrl": "/proceedings-article/icuems/2020/09151599/1lRlULsyN3O", "parentPublication": { "id": "proceedings/icuems/2020/8832/0", "title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaie/2020/6659/0/665900a493", "title": "Research on the Employment Intention of Medical Undergraduates in Grassroots Units", "doi": null, "abstractUrl": "/proceedings-article/icaie/2020/665900a493/1oZBK3xCLUQ", "parentPublication": { "id": "proceedings/icaie/2020/6659/0", "title": "2020 International Conference on Artificial Intelligence and Education (ICAIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2023/04/09537616", "title": "TIUI: Touching Live Video for Telepresence Operation", "doi": null, "abstractUrl": "/journal/tm/2023/04/09537616/1wTinsFrkju", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a016", "title": "Analysis and Optimization of Urban Tourism Spatial Behavior Path : &#x2014;Taking Nanjing City as an example", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a016/1ziPoJYl0cw", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2021/4246/0/424600a127", "title": "Dementia Sign Detection System Using Digital Twin", "doi": null, "abstractUrl": "/proceedings-article/candar/2021/424600a127/1zzquplPK0M", "parentPublication": { "id": "proceedings/candar/2021/4246/0", "title": "2021 Ninth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBdruc4", "doi": "10.1109/VR.2015.7223378", "title": "Influence of avatar realism on stressful situation in VR", "normalizedTitle": "Influence of avatar realism on stressful situation in VR", "abstract": "In this paper we present a study of the impact of avatar realism on user experience and performance in stressful immersive virtual environments. We evaluated a stressful and a stress-free environment with partial avatar embodiment under low (iconic) or high (photorealistic) visual fidelity conditions. An experiment with forty participants did not reveal any significant differences between both graphical versions. This first result represents an interesting finding since non realistic avatar and environment representations are faster and more economical to produce while requiring less computational resources.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a study of the impact of avatar realism on user experience and performance in stressful immersive virtual environments. We evaluated a stressful and a stress-free environment with partial avatar embodiment under low (iconic) or high (photorealistic) visual fidelity conditions. An experiment with forty participants did not reveal any significant differences between both graphical versions. This first result represents an interesting finding since non realistic avatar and environment representations are faster and more economical to produce while requiring less computational resources.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a study of the impact of avatar realism on user experience and performance in stressful immersive virtual environments. We evaluated a stressful and a stress-free environment with partial avatar embodiment under low (iconic) or high (photorealistic) visual fidelity conditions. An experiment with forty participants did not reveal any significant differences between both graphical versions. This first result represents an interesting finding since non realistic avatar and environment representations are faster and more economical to produce while requiring less computational resources.", "fno": "07223378", "keywords": [ "Avatars", "Games", "Skin", "Visualization", "Three Dimensional Displays", "Virtual Environments", "H 5 1 Information Systems Artificial Augmented And Virtual Realities" ], "authors": [ { "affiliation": "HCI Group, University of Würzburg, Germany", "fullName": "Jean-Luc Lugrin", "givenName": "Jean-Luc", "surname": "Lugrin", "__typename": "ArticleAuthorType" }, { "affiliation": "HCI Group, University of Würzburg, Germany", "fullName": "Maximilian Wiedemann", "givenName": "Maximilian", "surname": "Wiedemann", "__typename": "ArticleAuthorType" }, { "affiliation": "HCI Group, University of Würzburg, Germany", "fullName": "Daniel Bieberstein", "givenName": "Daniel", "surname": "Bieberstein", "__typename": "ArticleAuthorType" }, { "affiliation": "HCI Group, University of Würzburg, Germany", "fullName": "Marc Erich Latoschik", "givenName": "Marc Erich", "surname": "Latoschik", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "227-228", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223377", "articleId": "12OmNCcKQFn", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223379", "articleId": "12OmNAWpyrk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223379", "title": "Avatar anthropomorphism and illusion of body ownership in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223379/12OmNAWpyrk", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892278", "title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892278/12OmNvnwVj4", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446318/13bd1AITna8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446229", "title": "Any &#x201C;Body&#x201D; There? Avatar Visibility Effects in a Virtual Reality Game", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714123", "title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a141", "title": "Petting a cat helps you incarnate the avatar: Influence of the emotions over embodiment in VR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a141/1JrRepqALbW", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049676", "title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090457", "title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWcH18", "title": "2014 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNBhHt8t", "doi": "10.1109/CW.2014.21", "title": "User Avatar Association in Virtual Worlds", "normalizedTitle": "User Avatar Association in Virtual Worlds", "abstract": "Human embodiment in virtual worlds is becoming increasingly popular in this day and age. Many researchers argue the need for investigation into human involvement in virtual worlds. We aim to investigate the human involvement in virtual worlds such as Second Life. This is increasingly popular on Second Life which is a 3-D dimensional digital role-playing environment where users create avatars to interact with other users. It is essential to understand if users portray themselves in such environments and understand the complications of using fabricated identities. This paper investigates different attributes of identity users use to build their avatar and how the level of avatar customization corresponds to the degree of user-avatar association. Two experiments were conducted using qualitative methodology to distinguish users' perception of the virtual environment as well as to understand the relationship between the user and its avatar. The findings from the experiments indicated that the relationship between the user and avatar creates an effective interaction between other users. The findings have also indicated that the use of social identities is becoming popular on virtual environments. Public awareness of the use of multiple identities on virtual environments needs to be enhanced to reduce users becoming victims of inappropriate activities in Virtual Worlds.", "abstracts": [ { "abstractType": "Regular", "content": "Human embodiment in virtual worlds is becoming increasingly popular in this day and age. Many researchers argue the need for investigation into human involvement in virtual worlds. We aim to investigate the human involvement in virtual worlds such as Second Life. This is increasingly popular on Second Life which is a 3-D dimensional digital role-playing environment where users create avatars to interact with other users. It is essential to understand if users portray themselves in such environments and understand the complications of using fabricated identities. This paper investigates different attributes of identity users use to build their avatar and how the level of avatar customization corresponds to the degree of user-avatar association. Two experiments were conducted using qualitative methodology to distinguish users' perception of the virtual environment as well as to understand the relationship between the user and its avatar. The findings from the experiments indicated that the relationship between the user and avatar creates an effective interaction between other users. The findings have also indicated that the use of social identities is becoming popular on virtual environments. Public awareness of the use of multiple identities on virtual environments needs to be enhanced to reduce users becoming victims of inappropriate activities in Virtual Worlds.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human embodiment in virtual worlds is becoming increasingly popular in this day and age. Many researchers argue the need for investigation into human involvement in virtual worlds. We aim to investigate the human involvement in virtual worlds such as Second Life. This is increasingly popular on Second Life which is a 3-D dimensional digital role-playing environment where users create avatars to interact with other users. It is essential to understand if users portray themselves in such environments and understand the complications of using fabricated identities. This paper investigates different attributes of identity users use to build their avatar and how the level of avatar customization corresponds to the degree of user-avatar association. Two experiments were conducted using qualitative methodology to distinguish users' perception of the virtual environment as well as to understand the relationship between the user and its avatar. The findings from the experiments indicated that the relationship between the user and avatar creates an effective interaction between other users. The findings have also indicated that the use of social identities is becoming popular on virtual environments. Public awareness of the use of multiple identities on virtual environments needs to be enhanced to reduce users becoming victims of inappropriate activities in Virtual Worlds.", "fno": "4677a093", "keywords": [ "Avatars", "Second Life", "Interviews", "Virtual Environments", "Hair", "Color", "Computer Science", "False Self", "Avatar", "Second Life", "Virtual World", "Identity", "Human Embodiment", "Alter Ego", "True Self" ], "authors": [ { "affiliation": null, "fullName": "Aslan Kanamgotov", "givenName": "Aslan", "surname": "Kanamgotov", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lyzgeo Koshy", "givenName": "Lyzgeo", "surname": "Koshy", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Marc Conrad", "givenName": "Marc", "surname": "Conrad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Simant Prakoonwit", "givenName": "Simant", "surname": "Prakoonwit", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-10-01T00:00:00", "pubType": "proceedings", "pages": "93-100", "year": "2014", "issn": null, "isbn": "978-1-4799-4677-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4677a085", "articleId": "12OmNBLdKIp", "__typename": "AdjacentArticleType" }, "next": { "fno": "4677a101", "articleId": "12OmNxWcHb1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2011/4467/0/4467a227", "title": "Avatar Impotence: On 'User Will,' 'Avatar Agency,' and 'System Control' in Second Life", "doi": null, "abstractUrl": "/proceedings-article/cw/2011/4467a227/12OmNBa2iAQ", "parentPublication": { "id": "proceedings/cw/2011/4467/0", "title": "2011 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a107", "title": "Immersion in Virtual Worlds - But not Second Life!", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a107/12OmNrAv3P5", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sera/2011/1028/0/06065639", "title": "Death, Social Networks and Virtual Worlds: A Look Into the Digital Afterlife", "doi": null, "abstractUrl": "/proceedings-article/sera/2011/06065639/12OmNwKGAju", "parentPublication": { "id": "proceedings/sera/2011/1028/0", "title": "2011 9th International Conference on Software Engineering Research, Management and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pci/2009/3788/0/3788a207", "title": "Avatars' Appearance and Social Behavior in Online Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/pci/2009/3788a207/12OmNwt5sn9", "parentPublication": { "id": "proceedings/pci/2009/3788/0", "title": "2009 13th Panhellenic Conference on Informatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2011/4467/0/4467a093", "title": "Evaluation of Face Recognition Algorithms on Avatar Face Datasets", "doi": null, "abstractUrl": "/proceedings-article/cw/2011/4467a093/12OmNyFU6Ym", "parentPublication": { "id": "proceedings/cw/2011/4467/0", "title": "2011 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a027", "title": "Natural vs Artificial Face Classification Using Uniform Local Directional Patterns and Wavelet Uniform Local Directional Patterns", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a027/12OmNyQGSqs", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892a873", "title": "Social Affordances for People with Lifelong Disability through Using Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892a873/12OmNzEmFEs", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a724", "title": "Multimodal Affect Recognition in Virtual Worlds: Avatars Mirroring User's Affect", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a724/12OmNzahbSm", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2014/4677/0/4677a321", "title": "New Opportunities for Artistic Practice in Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a321/12OmNzahbYN", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2010/06/mex2010060017", "title": "An Integrated Framework for Avatar Data Collection from the Virtual World", "doi": null, "abstractUrl": "/magazine/ex/2010/06/mex2010060017/13rRUB7a0WV", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYXWAF", "title": "2016 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNviHKla", "doi": "10.1109/VR.2016.7504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "normalizedTitle": "The impact of a self-avatar on cognitive load in immersive virtual reality", "abstract": "The use of a self-avatar inside an immersive virtual reality system has been shown to have important effects on presence, interaction and perception of space. Based on studies from linguistics and cognition, in this paper we demonstrate that a self-avatar may aid the participant's cognitive processes while immersed in a virtual reality system. In our study participants were asked to memorise pairs of letters, perform a spatial rotation exercise and then recall the pairs of letters. In a between-subject factor they either had an avatar or not, and in a within-subject factor they were instructed to keep their hands still or not. We found that participants who both had an avatar and were allowed to move their hands had significantly higher letter pair recall. There was no significant difference between the other three conditions. Further analysis showed that participants who were allowed to move their hands, but could not see the self-avatar, usually didn't move their hands or stopped moving their hands after a short while. We argue that an active self-avatar may alleviate the mental load of doing the spatial rotation exercise and thus improve letter recall. The results are further evidence of the importance of an appropriate self-avatar representation in immersive virtual reality.", "abstracts": [ { "abstractType": "Regular", "content": "The use of a self-avatar inside an immersive virtual reality system has been shown to have important effects on presence, interaction and perception of space. Based on studies from linguistics and cognition, in this paper we demonstrate that a self-avatar may aid the participant's cognitive processes while immersed in a virtual reality system. In our study participants were asked to memorise pairs of letters, perform a spatial rotation exercise and then recall the pairs of letters. In a between-subject factor they either had an avatar or not, and in a within-subject factor they were instructed to keep their hands still or not. We found that participants who both had an avatar and were allowed to move their hands had significantly higher letter pair recall. There was no significant difference between the other three conditions. Further analysis showed that participants who were allowed to move their hands, but could not see the self-avatar, usually didn't move their hands or stopped moving their hands after a short while. We argue that an active self-avatar may alleviate the mental load of doing the spatial rotation exercise and thus improve letter recall. The results are further evidence of the importance of an appropriate self-avatar representation in immersive virtual reality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of a self-avatar inside an immersive virtual reality system has been shown to have important effects on presence, interaction and perception of space. Based on studies from linguistics and cognition, in this paper we demonstrate that a self-avatar may aid the participant's cognitive processes while immersed in a virtual reality system. In our study participants were asked to memorise pairs of letters, perform a spatial rotation exercise and then recall the pairs of letters. In a between-subject factor they either had an avatar or not, and in a within-subject factor they were instructed to keep their hands still or not. We found that participants who both had an avatar and were allowed to move their hands had significantly higher letter pair recall. There was no significant difference between the other three conditions. Further analysis showed that participants who were allowed to move their hands, but could not see the self-avatar, usually didn't move their hands or stopped moving their hands after a short while. We argue that an active self-avatar may alleviate the mental load of doing the spatial rotation exercise and thus improve letter recall. The results are further evidence of the importance of an appropriate self-avatar representation in immersive virtual reality.", "fno": "07504689", "keywords": [ "Cognition", "Avatars", "Solid Modeling", "Virtual Environments", "Visualization", "Estimation", "H 5 1 Multimedia Information Systems Artificial Augmented And Virtual Realities" ], "authors": [ { "affiliation": "University College London, UK", "fullName": "Anthony Steed", "givenName": "Anthony", "surname": "Steed", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London, UK", "fullName": "Ye Pan", "givenName": "Ye", "surname": "Pan", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London, UK", "fullName": "Fiona Zisch", "givenName": "Fiona", "surname": "Zisch", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London, UK", "fullName": "William Steptoe", "givenName": "William", "surname": "Steptoe", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "67-76", "year": "2016", "issn": "2375-5334", "isbn": "978-1-5090-0836-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07504688", "articleId": "12OmNxxvALV", "__typename": "AdjacentArticleType" }, "next": { "fno": "07504690", "articleId": "12OmNwwd2Jt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223405", "title": "Wings and flying in immersive VR — Controller type, sound effects and experienced ownership and agency", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223405/12OmNBOCWnu", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948466", "title": "[Poster] Interacting with your own hands in a fully immersive MR system", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948466/12OmNrMHOkY", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892275", "title": "Socially immersive avatar-based communication", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892275/12OmNwEJ0VR", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446318/13bd1AITna8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714123", "title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049676", "title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998371", "title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090453", "title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09200517", "title": "On the Plausibility of Virtual Body Animation Features in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/04/09200517/1ndVuuNfI64", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNwEJ0VR", "doi": "10.1109/VR.2017.7892275", "title": "Socially immersive avatar-based communication", "normalizedTitle": "Socially immersive avatar-based communication", "abstract": "In this paper, we present SIAM-C, an avatar-mediated communication platform to study socially immersive interaction in virtual environments. The proposed system is capable of tracking, transmitting, representing body motion, facial expressions, and voice via virtual avatars and inherits the transmission of human behaviors that are available in real-life social interactions. Users are immersed using active stereoscopic rendering projected onto a life-size projection plane, utilizing the concept of “fish tank” virtual reality (VR). Our prototype connects two separate rooms and allows for socially immersive avatar-mediated communication in VR.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present SIAM-C, an avatar-mediated communication platform to study socially immersive interaction in virtual environments. The proposed system is capable of tracking, transmitting, representing body motion, facial expressions, and voice via virtual avatars and inherits the transmission of human behaviors that are available in real-life social interactions. Users are immersed using active stereoscopic rendering projected onto a life-size projection plane, utilizing the concept of “fish tank” virtual reality (VR). Our prototype connects two separate rooms and allows for socially immersive avatar-mediated communication in VR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present SIAM-C, an avatar-mediated communication platform to study socially immersive interaction in virtual environments. The proposed system is capable of tracking, transmitting, representing body motion, facial expressions, and voice via virtual avatars and inherits the transmission of human behaviors that are available in real-life social interactions. Users are immersed using active stereoscopic rendering projected onto a life-size projection plane, utilizing the concept of “fish tank” virtual reality (VR). Our prototype connects two separate rooms and allows for socially immersive avatar-mediated communication in VR.", "fno": "07892275", "keywords": [ "Tracking", "Avatars", "Virtual Environments", "Stereo Image Processing", "Three Dimensional Displays", "Sensors", "H 5 1 Multimedia Information Systems Artificial Augmented And Virtual Realities", "H 4 3 Communications Applications" ], "authors": [ { "affiliation": "University of Wurzburg, Germany", "fullName": "Daniel Roth", "givenName": "Daniel", "surname": "Roth", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cologne, Germany", "fullName": "Kristoffer Waldow", "givenName": "Kristoffer", "surname": "Waldow", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Wurzburg, Germany", "fullName": "Marc Erich Latoschik", "givenName": "Marc Erich", "surname": "Latoschik", "__typename": "ArticleAuthorType" }, { "affiliation": "TH Koln, Germany", "fullName": "Arnulph Fuhrmann", "givenName": "Arnulph", "surname": "Fuhrmann", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cologne, Germany", "fullName": "Gary Bente", "givenName": "Gary", "surname": "Bente", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "259-260", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892274", "articleId": "12OmNqH9hid", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892276", "articleId": "12OmNAQJzMG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504735", "title": "FaceBo: Real-time face and body tracking for faithful avatar synthesis", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504735/12OmNBRbkpf", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446318/13bd1AITna8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446261", "title": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446261/13bd1gCd7T2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a001", "title": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a001/1CJdXjsLKBG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089645", "title": "Comparative Evaluation of Viewing and Self-Representation on Passability Affordances to a Realistic Sliding Doorway in Real and Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089645/1jIx9zwn7SE", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090630", "title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382845", "title": "The Influence of Avatar Representation on Interpersonal Communication in Virtual Social Environments", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382845/1saZq7bIPUQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mWh", "title": "2013 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNxRWI3d", "doi": "10.1109/VR.2013.6549379", "title": "Head motion animation using avatar gaze space", "normalizedTitle": "Head motion animation using avatar gaze space", "abstract": "Animation plays a key role in adding realism to any graphical environment. In this work we present a technique for animating multiple avatars in a virtual environment designed to support remote collaboration between distributed work teams in which users are represented by avatars. There will be long periods of time when the user is not actively controlling the avatar and working on his official task. We need to animate his avatar with a `working-at-desk' animation that should be non-looping and sufficiently random for a single avatar as well as between multiple avatars to appear realistic. We present a technique for generating multiple head motions using (a) nonlinear dimensionality reduction and (b) gaze space images to control the motion. Our technique can automatically generate long sequences of motion without any user intervention. We present results from synthetic data.", "abstracts": [ { "abstractType": "Regular", "content": "Animation plays a key role in adding realism to any graphical environment. In this work we present a technique for animating multiple avatars in a virtual environment designed to support remote collaboration between distributed work teams in which users are represented by avatars. There will be long periods of time when the user is not actively controlling the avatar and working on his official task. We need to animate his avatar with a `working-at-desk' animation that should be non-looping and sufficiently random for a single avatar as well as between multiple avatars to appear realistic. We present a technique for generating multiple head motions using (a) nonlinear dimensionality reduction and (b) gaze space images to control the motion. Our technique can automatically generate long sequences of motion without any user intervention. We present results from synthetic data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Animation plays a key role in adding realism to any graphical environment. In this work we present a technique for animating multiple avatars in a virtual environment designed to support remote collaboration between distributed work teams in which users are represented by avatars. There will be long periods of time when the user is not actively controlling the avatar and working on his official task. We need to animate his avatar with a `working-at-desk' animation that should be non-looping and sufficiently random for a single avatar as well as between multiple avatars to appear realistic. We present a technique for generating multiple head motions using (a) nonlinear dimensionality reduction and (b) gaze space images to control the motion. Our technique can automatically generate long sequences of motion without any user intervention. We present results from synthetic data.", "fno": "06549379", "keywords": [ "Avatars", "Animation", "Head", "Semantics", "Keyboards", "Virtual Environments", "K 6 1 Virtual Digital Characters Three Dimensional Graphics And Realism Animation" ], "authors": [ { "affiliation": "Indian Inst. of Technol., Kanpur, Kanpur, India", "fullName": "M. S. Ramaiah", "givenName": "M. S.", "surname": "Ramaiah", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Inst. of Technol., Delhi, Delhi, India", "fullName": "Ankit Vijay", "givenName": "Ankit", "surname": "Vijay", "__typename": "ArticleAuthorType" }, { "affiliation": "Tata Consultancy Services, India", "fullName": "Geetika Sharma", "givenName": "Geetika", "surname": "Sharma", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Inst. of Technol., Kanpur, Kanpur, India", "fullName": "Amitabha Mukerjee", "givenName": "Amitabha", "surname": "Mukerjee", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "95-96", "year": "2013", "issn": "1087-8270", "isbn": "978-1-4673-4795-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06549378", "articleId": "12OmNBvkdnR", "__typename": "AdjacentArticleType" }, "next": { "fno": "06549380", "articleId": "12OmNxXCGEs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cisis/2012/4687/0/4687a741", "title": "An Avatar Motion Generation Method Based on Inverse Kinematics and Interactive Evolutionary Computation", "doi": null, "abstractUrl": "/proceedings-article/cisis/2012/4687a741/12OmNwO5LZL", "parentPublication": { "id": "proceedings/cisis/2012/4687/0", "title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404654", "title": "Toward \"Pseudo-Haptic Avatars\": Modifying the Visual Animation of Self-Avatar Can Simulate the Perception of Weight Lifting", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404654/13rRUyft7D4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a237", "title": "Integrating Biomechanical and Animation Motion Capture Methods in the Production of Participant Specific, Scaled Avatars", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a237/17D45XeKgqk", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a429", "title": "Real-time Expressive Avatar Animation Generation based on Monocular Videos", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a429/1J7Wj0kJrJm", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998352", "title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09200517", "title": "On the Plausibility of Virtual Body Animation Features in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/04/09200517/1ndVuuNfI64", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a091", "title": "MoveBox: Democratizing MoCap for the Microsoft Rocketbox Avatar Library", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a091/1qpzzqGXwA0", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a128", "title": "AlterEcho: Loose Avatar-Streamer Coupling for Expressive VTubing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a128/1yeCWKEosp2", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCcbEdc", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "acronym": "iciev", "groupId": "1802578", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNyRg4fm", "doi": "10.1109/ICIEV.2016.7759995", "title": "Humanoid avatar mentor: Integrating VLE and traditional classroom environment for distance learning", "normalizedTitle": "Humanoid avatar mentor: Integrating VLE and traditional classroom environment for distance learning", "abstract": "On this era of virtual network, the concept of Virtual Learning Environment (VLE) has been applied to many educational institution and having affirmative feedback in application level. Meanwhile, it has now become possible to control a humanoid robot by human brain as well as body. Though VLE is gaining popularity, in some cases the need for traditional classroom environment can never be denied. Henceforth, in this paper we propose for a virtual environment based teaching method by applying the concept of human avatar in traditional classroom environment. Incorporating the idea of VLE, humanoid robot avatar mentor and traditional classroom, we design for an humanoid robot avatar controlled virtual learning system in traditional classroom environment. Avatar robots integrated with one human body can take multiple classes at multiple places in virtually connected class rooms. This concept can play a vital role to promote distant learning in a fruitful way.", "abstracts": [ { "abstractType": "Regular", "content": "On this era of virtual network, the concept of Virtual Learning Environment (VLE) has been applied to many educational institution and having affirmative feedback in application level. Meanwhile, it has now become possible to control a humanoid robot by human brain as well as body. Though VLE is gaining popularity, in some cases the need for traditional classroom environment can never be denied. Henceforth, in this paper we propose for a virtual environment based teaching method by applying the concept of human avatar in traditional classroom environment. Incorporating the idea of VLE, humanoid robot avatar mentor and traditional classroom, we design for an humanoid robot avatar controlled virtual learning system in traditional classroom environment. Avatar robots integrated with one human body can take multiple classes at multiple places in virtually connected class rooms. This concept can play a vital role to promote distant learning in a fruitful way.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "On this era of virtual network, the concept of Virtual Learning Environment (VLE) has been applied to many educational institution and having affirmative feedback in application level. Meanwhile, it has now become possible to control a humanoid robot by human brain as well as body. Though VLE is gaining popularity, in some cases the need for traditional classroom environment can never be denied. Henceforth, in this paper we propose for a virtual environment based teaching method by applying the concept of human avatar in traditional classroom environment. Incorporating the idea of VLE, humanoid robot avatar mentor and traditional classroom, we design for an humanoid robot avatar controlled virtual learning system in traditional classroom environment. Avatar robots integrated with one human body can take multiple classes at multiple places in virtually connected class rooms. This concept can play a vital role to promote distant learning in a fruitful way.", "fno": "07759995", "keywords": [ "Avatars", "Humanoid Robots", "Electronic Learning", "Virtual Environments", "Face", "Distant Learning", "VLE", "Humanoid Robot", "Humanoid Robot Avatar" ], "authors": [ { "affiliation": "Department of Electrical and Computer Engineering, Northsouth University Dhaka, Bangladesh", "fullName": "Sara Binte Zinnat", "givenName": "Sara Binte", "surname": "Zinnat", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering, Primeasia University Dhaka, Bangladesh", "fullName": "Deen Md. Abdullah", "givenName": "Deen Md.", "surname": "Abdullah", "__typename": "ArticleAuthorType" } ], "idPrefix": "iciev", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-05-01T00:00:00", "pubType": "proceedings", "pages": "199-203", "year": "2016", "issn": null, "isbn": "978-1-5090-1269-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07759994", "articleId": "12OmNzZEAIp", "__typename": "AdjacentArticleType" }, "next": { "fno": "07759996", "articleId": "12OmNwHQB3v", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2008/3381/0/3381a561", "title": "Virtual-RE: A Humanoid Robotic Soccer Simulator", "doi": null, "abstractUrl": "/proceedings-article/cw/2008/3381a561/12OmNBtl1Hn", "parentPublication": { "id": "proceedings/cw/2008/3381/0", "title": "2008 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549379", "title": "Head motion animation using avatar gaze space", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fbit/2007/2999/0/29990628", "title": "Usability Evaluation of Humanoid-Animation Avatar with Physiological Signals", "doi": null, "abstractUrl": "/proceedings-article/fbit/2007/29990628/12OmNzVoBV7", "parentPublication": { "id": "proceedings/fbit/2007/2999/0", "title": "2007 Frontiers in the Convergence of Bioscience and Information Technologies (FBIT '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284819", "title": "Real-Time Humanoid Avatar for Multimodal Human-Machine Interaction", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284819/12OmNzkMlMm", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446318/13bd1AITna8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040583", "title": "Human Tails: Ownership and Control of Extended Humanoid Avatars", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040583/13rRUxYrbUF", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/01/06357232", "title": "A Neurally Controlled Computer Game Avatar With Humanlike Behavior", "doi": null, "abstractUrl": "/journal/ci/2013/01/06357232/13rRUzpzeEr", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a650", "title": "Emotional Avatars: Effect of Uncanniness in Identifying Emotions using Avatar Expressions", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a650/1CJdQj37aw0", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2022/6960/0/696000a153", "title": "Mixed reality (MR) Enabled Proprio and Teleoperation of a Humanoid Robot for Paraplegic Patients", "doi": null, "abstractUrl": "/proceedings-article/icict/2022/696000a153/1FJ5bdmciJO", "parentPublication": { "id": "proceedings/icict/2022/6960/0", "title": "2022 5th International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090630", "title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1AITna8", "doi": "10.1109/VR.2018.8446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "normalizedTitle": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "abstract": "How does the representation of an embodied avatar influence the way in which a human perceives the scale of a virtual environment? It has been shown that the scale of the external environment is perceived relative to the size of one's body. However, the influence of avatar realism on the perceived scale has not been investigated, despite the fact that it is common to embody avatars of various representations, from iconic to realistic. This study examined how avatar realism would affect perceived graspable object sizes as the size of the avatar hand changes. In the experiment, we manipulated the realism (high, medium, and low) and size (veridical and enlarged) of the avatar hand, and measured the perceived size of a cube. The results showed that the size of the cube was perceived to be smaller when the avatar hand was enlarged for all degrees of realism of the hand. However, the enlargement of the avatar hand had a greater influence on the perceived cube size for the highly realistic avatar than for the medium-level and low-level realism conditions. This study shed new light on the importance of the avatar representation in a three-dimensional user interface field, in how it can affect the manner in which we perceive the scale of a virtual environment.", "abstracts": [ { "abstractType": "Regular", "content": "How does the representation of an embodied avatar influence the way in which a human perceives the scale of a virtual environment? It has been shown that the scale of the external environment is perceived relative to the size of one's body. However, the influence of avatar realism on the perceived scale has not been investigated, despite the fact that it is common to embody avatars of various representations, from iconic to realistic. This study examined how avatar realism would affect perceived graspable object sizes as the size of the avatar hand changes. In the experiment, we manipulated the realism (high, medium, and low) and size (veridical and enlarged) of the avatar hand, and measured the perceived size of a cube. The results showed that the size of the cube was perceived to be smaller when the avatar hand was enlarged for all degrees of realism of the hand. However, the enlargement of the avatar hand had a greater influence on the perceived cube size for the highly realistic avatar than for the medium-level and low-level realism conditions. This study shed new light on the importance of the avatar representation in a three-dimensional user interface field, in how it can affect the manner in which we perceive the scale of a virtual environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "How does the representation of an embodied avatar influence the way in which a human perceives the scale of a virtual environment? It has been shown that the scale of the external environment is perceived relative to the size of one's body. However, the influence of avatar realism on the perceived scale has not been investigated, despite the fact that it is common to embody avatars of various representations, from iconic to realistic. This study examined how avatar realism would affect perceived graspable object sizes as the size of the avatar hand changes. In the experiment, we manipulated the realism (high, medium, and low) and size (veridical and enlarged) of the avatar hand, and measured the perceived size of a cube. The results showed that the size of the cube was perceived to be smaller when the avatar hand was enlarged for all degrees of realism of the hand. However, the enlargement of the avatar hand had a greater influence on the perceived cube size for the highly realistic avatar than for the medium-level and low-level realism conditions. This study shed new light on the importance of the avatar representation in a three-dimensional user interface field, in how it can affect the manner in which we perceive the scale of a virtual environment.", "fno": "08446318", "keywords": [ "Avatars", "User Interfaces", "Virtual Reality", "Visual Perception", "Virtual Environment", "Avatar Realism", "Low Level Realism Conditions", "Avatar Representation", "Object Size Perception", "Immersive Virtual Reality", "Realistic Avatar", "Three Dimensional User Interface", "Avatars", "Estimation", "Hysteresis", "Skin", "Virtual Environments", "Human Centered Computing Virtual Reality" ], "authors": [ { "affiliation": "The University of Tokyo", "fullName": "Nami Ogawa", "givenName": "Nami", "surname": "Ogawa", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo / JST PREST", "fullName": "Takuji Narumi", "givenName": "Takuji", "surname": "Narumi", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Michitaka Hirose", "givenName": "Michitaka", "surname": "Hirose", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "647-648", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446225", "articleId": "13bd1sx4Zt8", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446445", "articleId": "13bd1AITnaH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223379", "title": "Avatar anthropomorphism and illusion of body ownership in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223379/12OmNAWpyrk", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223378", "title": "Influence of avatar realism on stressful situation in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223378/12OmNBdruc4", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223377", "title": "Avatar embodiment realism and virtual fitness training", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892275", "title": "Socially immersive avatar-based communication", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892275/12OmNwEJ0VR", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549379", "title": "Head motion animation using avatar gaze space", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504731", "title": "The effect of realism on the virtual hand illusion", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504731/12OmNxu6p9n", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798040", "title": "Virtual Hand Realism Affects Object Size Perception in Body-Based Scaling", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798040/1cJ14CI2Jsk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a127", "title": "Evidence for a Relationship Between Self-Avatar Fixations and Perceived Avatar Similarity within Low-Cost Virtual Reality Embodiment", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a127/1tnXDDh8sqk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1gCd7T2", "doi": "10.1109/VR.2018.8446261", "title": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments", "normalizedTitle": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments", "abstract": "Virtual reality applications have begun to offer great potential for communication in recent years. Creating an immersive virtual social environment that simulates a real social environment requires providing users with communication cues such as visual, verbal, and nonverbal cues to increase their sense of inhabiting the virtual world. In this work, we will investigate the influence of avatar representation and behavior on communication in an immersive, multiuser, same-place virtual environment by comparing three conditions of avatar representation: video see-through, scanned realistic avatar, and no-avatar representations. Subjective and objective measurements will be used to describe participants' observations and track their movement behavior to ascertain the effect of avatar representations on communication, based on personal presence, social presence, and trustworthiness.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality applications have begun to offer great potential for communication in recent years. Creating an immersive virtual social environment that simulates a real social environment requires providing users with communication cues such as visual, verbal, and nonverbal cues to increase their sense of inhabiting the virtual world. In this work, we will investigate the influence of avatar representation and behavior on communication in an immersive, multiuser, same-place virtual environment by comparing three conditions of avatar representation: video see-through, scanned realistic avatar, and no-avatar representations. Subjective and objective measurements will be used to describe participants' observations and track their movement behavior to ascertain the effect of avatar representations on communication, based on personal presence, social presence, and trustworthiness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality applications have begun to offer great potential for communication in recent years. Creating an immersive virtual social environment that simulates a real social environment requires providing users with communication cues such as visual, verbal, and nonverbal cues to increase their sense of inhabiting the virtual world. In this work, we will investigate the influence of avatar representation and behavior on communication in an immersive, multiuser, same-place virtual environment by comparing three conditions of avatar representation: video see-through, scanned realistic avatar, and no-avatar representations. Subjective and objective measurements will be used to describe participants' observations and track their movement behavior to ascertain the effect of avatar representations on communication, based on personal presence, social presence, and trustworthiness.", "fno": "08446261", "keywords": [ "Avatars", "Avatar Representation", "Same Place Virtual Environment", "Scanned Realistic Avatar", "No Avatar Representations", "Social Presence", "Social Immersive Virtual Environments", "Virtual Reality Applications", "Immersive Virtual Social Environment", "Communication Cues", "Visual Cues", "Nonverbal Cues", "Virtual World", "Verbal Cues", "Personal Presence", "Trustworthiness", "Avatars", "Three Dimensional Displays", "Virtual Environments", "Cameras", "Real Time Systems", "Current Measurement", "Avatar", "Communication", "Personal Presence", "Social Presence", "Trustworthiness", "Virtual Reality" ], "authors": [ { "affiliation": "Department of Computer Science and Engineering, University of Minnesota Twin Cities, Minneapolis, USA", "fullName": "Sahar A. Aseeri", "givenName": "Sahar A.", "surname": "Aseeri", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering, University of Minnesota Twin Cities, Minneapolis, USA", "fullName": "Victoria Interrante", "givenName": "Victoria", "surname": "Interrante", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "823-824", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446292", "articleId": "13bd1fZBGcO", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446426", "articleId": "13bd1fHrlRZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223378", "title": "Influence of avatar realism on stressful situation in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223378/12OmNBdruc4", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892275", "title": "Socially immersive avatar-based communication", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892275/12OmNwEJ0VR", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504775", "title": "Evaluation of the effect of a virtual avatar's representation on distance perception in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504775/12OmNwpXROu", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446318", "title": "Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446318/13bd1AITna8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714123", "title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090630", "title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382845", "title": "The Influence of Avatar Representation on Interpersonal Communication in Virtual Social Environments", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382845/1saZq7bIPUQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs4S8wv", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNqFJhV7", "doi": "10.1109/3DUI.2014.6798870", "title": "Poster: Immersive point cloud virtual environments", "normalizedTitle": "Poster: Immersive point cloud virtual environments", "abstract": "Today's three-dimensional (3D) virtual environments (VEs) are usually based on textured polygonal 3D models, which represent the appearance and geometry of the virtual world. However, some application domains require other graphical paradigms, which are currently not adequately addressed by 3D user interfaces. We introduce a novel approach for a technical human-robot telepresence setup that allows a human observer to explore a VE, which is a 3D reconstruction of the real world based on point clouds. Such point cloud virtual environments (PCVEs) represent the external environment, and are usually acquired by 3D scanners. We present an application scenario, in which a mobile robot captures 3D scans of a terrestrial environment, which are automatically registered to a coherent PCVE. This virtual 3D reconstruction is displayed in an immersive virtual environment (IVE) in which a user can explore the PCVE. We explain and describe the technical setup, which opens up new vistas of presenting a VE as points rather than a polygonal representation.", "abstracts": [ { "abstractType": "Regular", "content": "Today's three-dimensional (3D) virtual environments (VEs) are usually based on textured polygonal 3D models, which represent the appearance and geometry of the virtual world. However, some application domains require other graphical paradigms, which are currently not adequately addressed by 3D user interfaces. We introduce a novel approach for a technical human-robot telepresence setup that allows a human observer to explore a VE, which is a 3D reconstruction of the real world based on point clouds. Such point cloud virtual environments (PCVEs) represent the external environment, and are usually acquired by 3D scanners. We present an application scenario, in which a mobile robot captures 3D scans of a terrestrial environment, which are automatically registered to a coherent PCVE. This virtual 3D reconstruction is displayed in an immersive virtual environment (IVE) in which a user can explore the PCVE. We explain and describe the technical setup, which opens up new vistas of presenting a VE as points rather than a polygonal representation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Today's three-dimensional (3D) virtual environments (VEs) are usually based on textured polygonal 3D models, which represent the appearance and geometry of the virtual world. However, some application domains require other graphical paradigms, which are currently not adequately addressed by 3D user interfaces. We introduce a novel approach for a technical human-robot telepresence setup that allows a human observer to explore a VE, which is a 3D reconstruction of the real world based on point clouds. Such point cloud virtual environments (PCVEs) represent the external environment, and are usually acquired by 3D scanners. We present an application scenario, in which a mobile robot captures 3D scans of a terrestrial environment, which are automatically registered to a coherent PCVE. This virtual 3D reconstruction is displayed in an immersive virtual environment (IVE) in which a user can explore the PCVE. We explain and describe the technical setup, which opens up new vistas of presenting a VE as points rather than a polygonal representation.", "fno": "06798870", "keywords": [ "Three Dimensional Displays", "Rendering Computer Graphics", "Legged Locomotion", "Virtual Environments", "User Interfaces" ], "authors": [ { "affiliation": "Department of Computer Science, University of Würzburg, Germany", "fullName": "Gerd Bruder", "givenName": "Gerd", "surname": "Bruder", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, University of Würzburg, Germany", "fullName": "Frank Steinicke", "givenName": "Frank", "surname": "Steinicke", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, University of Würzburg, Germany", "fullName": "Andreas Nuchter", "givenName": "Andreas", "surname": "Nuchter", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-03-01T00:00:00", "pubType": "proceedings", "pages": "161-162", "year": "2014", "issn": null, "isbn": "978-1-4799-3624-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06798869", "articleId": "12OmNAWpyuN", "__typename": "AdjacentArticleType" }, "next": { "fno": "06798871", "articleId": "12OmNqJHFAm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2007/0905/0/04161022", "title": "Walking into Images: Virtual Plane Mosaics for Plenoptic Modeling", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161022/12OmNAT0mSD", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802054", "title": "Time perception during walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802054/12OmNBpmDG4", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549377", "title": "Collision prediction and prevention in a simultaneous two-user immersive virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549377/12OmNCxbXEj", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460037", "title": "An initial exploration of a multi-sensory design space: Tactile support for walking in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460037/12OmNrYCXTx", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dcve/2014/5217/0/07160931", "title": "Measuring the collaboration degree in immersive 3D collaborative virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dcve/2014/07160931/12OmNvjQ8QS", "parentPublication": { "id": "proceedings/3dcve/2014/5217/0", "title": "2014 International Workshop on Collaborative Virtual Environments (3DCVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892303", "title": "Object location memory error in virtual and real environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892303/12OmNx7ouWn", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798865", "title": "Poster: Guided tour creation in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798865/12OmNxdDFHO", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504752", "title": "Disguising rotational gain for redirected walking in virtual reality: Effect of visual density", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504752/12OmNyr8YkS", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476610", "title": "Poster: Generic Redirected Walking &#x00026; Dynamic Passive Haptics: Evaluation and Implications for Virtual Locomotion Interfaces", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476610/12OmNzlD9i9", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798029", "title": "Studying the Mental Effort in Virtual Versus Real Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798029/1cJ0I9M7tVm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYXWAF", "title": "2016 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxYtu4K", "doi": "10.1109/VR.2016.7504745", "title": "Acoustic redirected walking with auditory cues by means of wave field synthesis", "normalizedTitle": "Acoustic redirected walking with auditory cues by means of wave field synthesis", "abstract": "We present an experiment to identify detection thresholds for acoustic redirected walking by means of wave field synthesis. The most natural way to navigate an avatar through an immersive virtual environment (IVE) is by copying the tracked physical movements of a user. Redirected walking offers an approach to tackle the discrepancy between the potentially infinite IVE and the generally limited available physical space or tracking area, by applying manipulations, such as rotations or translations, to the IVE in form of gains to the users movements. 39 blindfolded test subjects performed a total of 2777 constant stimulus trials with various amounts of rotation and curvature gains. The test subjects were divided into four groups with different knowledge of the experiment, and one group performed two-alternative-forced-choice tasks, while the others could give feedback freely. The detection thresholds were greatly dependent on the groups i.e., the knowledge of the experiment. The 25% detection threshold was reached by the most relevant test group at gains that up-scaled rotations by 5%, down-scaled them by 37.5%, and bend a straight path into a circle with a radius of 5.71 meters. Almost no signs of simulator sickness could be observed.", "abstracts": [ { "abstractType": "Regular", "content": "We present an experiment to identify detection thresholds for acoustic redirected walking by means of wave field synthesis. The most natural way to navigate an avatar through an immersive virtual environment (IVE) is by copying the tracked physical movements of a user. Redirected walking offers an approach to tackle the discrepancy between the potentially infinite IVE and the generally limited available physical space or tracking area, by applying manipulations, such as rotations or translations, to the IVE in form of gains to the users movements. 39 blindfolded test subjects performed a total of 2777 constant stimulus trials with various amounts of rotation and curvature gains. The test subjects were divided into four groups with different knowledge of the experiment, and one group performed two-alternative-forced-choice tasks, while the others could give feedback freely. The detection thresholds were greatly dependent on the groups i.e., the knowledge of the experiment. The 25% detection threshold was reached by the most relevant test group at gains that up-scaled rotations by 5%, down-scaled them by 37.5%, and bend a straight path into a circle with a radius of 5.71 meters. Almost no signs of simulator sickness could be observed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an experiment to identify detection thresholds for acoustic redirected walking by means of wave field synthesis. The most natural way to navigate an avatar through an immersive virtual environment (IVE) is by copying the tracked physical movements of a user. Redirected walking offers an approach to tackle the discrepancy between the potentially infinite IVE and the generally limited available physical space or tracking area, by applying manipulations, such as rotations or translations, to the IVE in form of gains to the users movements. 39 blindfolded test subjects performed a total of 2777 constant stimulus trials with various amounts of rotation and curvature gains. The test subjects were divided into four groups with different knowledge of the experiment, and one group performed two-alternative-forced-choice tasks, while the others could give feedback freely. The detection thresholds were greatly dependent on the groups i.e., the knowledge of the experiment. The 25% detection threshold was reached by the most relevant test group at gains that up-scaled rotations by 5%, down-scaled them by 37.5%, and bend a straight path into a circle with a radius of 5.71 meters. Almost no signs of simulator sickness could be observed.", "fno": "07504745", "keywords": [ "Legged Locomotion", "Acoustics", "Tracking", "Virtual Environments", "Navigation", "Software", "H 5 1 Information And Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities", "H 1 2 Models And Principles User Machine Systems Software Psychology Human Factors", "J 7 Computers In Other Systems Realtime" ], "authors": [ { "affiliation": "UAS Hamburg", "fullName": "Malte Nogalski", "givenName": "Malte", "surname": "Nogalski", "__typename": "ArticleAuthorType" }, { "affiliation": "UAS Hamburg", "fullName": "Wolfgang Fohl", "givenName": "Wolfgang", "surname": "Fohl", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "245-246", "year": "2016", "issn": "2375-5334", "isbn": "978-1-5090-0836-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07504744", "articleId": "12OmNyKJiB6", "__typename": "AdjacentArticleType" }, "next": { "fno": "07504746", "articleId": "12OmNBf94W7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2016/0842/0/07460032", "title": "Automated path prediction for redirected walking using navigation meshes", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802053", "title": "An enhanced steering algorithm for redirected walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549412", "title": "Estimation of detection thresholds for acoustic based redirected walking techniques", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446479", "title": "Adopting the Roll Manipulation for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446225", "title": "Effect of Environment Size on Curvature Redirected Walking Thresholds", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07036075", "title": "Cognitive Resource Demands of Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798319", "title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798117", "title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090595", "title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523832", "title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNxy4N0w", "doi": "10.1109/VR.2017.7892241", "title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "normalizedTitle": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment", "abstract": "The use of first-person self-avatars in immersive virtual environments (VEs) has grown over recent years. It is unknown, however, how visual feedback from a self-avatar influences a user's online actions and subsequent calibration of actions within an immersive VE. The current paper uses a prism throwing adaptation paradigm to test the role of a self-avatar arm or full body on action calibration in a VE. Participants' throwing accuracy to a target on the ground was measured first in a normal viewing environment, then with the visual field rotated clockwise about their vertical axis by 17° (prism simulation), and then again in the normal viewing environment with the prism distortion removed. Participants experienced either no-avatar, a first-person avatar arm and hand, or a first-person full body avatar during the entire experimental session, in a between-subjects manipulation. Results showed similar throwing error and adaptation during the prism exposure for all conditions, but a reduced aftereffect (displacement with respect to the target in the opposite direction of the prism-exposure) when the avatar arm or full body was present. The results are discussed in the context of how an avatar can provide a visual frame of reference to aid in action calibration.", "abstracts": [ { "abstractType": "Regular", "content": "The use of first-person self-avatars in immersive virtual environments (VEs) has grown over recent years. It is unknown, however, how visual feedback from a self-avatar influences a user's online actions and subsequent calibration of actions within an immersive VE. The current paper uses a prism throwing adaptation paradigm to test the role of a self-avatar arm or full body on action calibration in a VE. Participants' throwing accuracy to a target on the ground was measured first in a normal viewing environment, then with the visual field rotated clockwise about their vertical axis by 17° (prism simulation), and then again in the normal viewing environment with the prism distortion removed. Participants experienced either no-avatar, a first-person avatar arm and hand, or a first-person full body avatar during the entire experimental session, in a between-subjects manipulation. Results showed similar throwing error and adaptation during the prism exposure for all conditions, but a reduced aftereffect (displacement with respect to the target in the opposite direction of the prism-exposure) when the avatar arm or full body was present. The results are discussed in the context of how an avatar can provide a visual frame of reference to aid in action calibration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of first-person self-avatars in immersive virtual environments (VEs) has grown over recent years. It is unknown, however, how visual feedback from a self-avatar influences a user's online actions and subsequent calibration of actions within an immersive VE. The current paper uses a prism throwing adaptation paradigm to test the role of a self-avatar arm or full body on action calibration in a VE. Participants' throwing accuracy to a target on the ground was measured first in a normal viewing environment, then with the visual field rotated clockwise about their vertical axis by 17° (prism simulation), and then again in the normal viewing environment with the prism distortion removed. Participants experienced either no-avatar, a first-person avatar arm and hand, or a first-person full body avatar during the entire experimental session, in a between-subjects manipulation. Results showed similar throwing error and adaptation during the prism exposure for all conditions, but a reduced aftereffect (displacement with respect to the target in the opposite direction of the prism-exposure) when the avatar arm or full body was present. The results are discussed in the context of how an avatar can provide a visual frame of reference to aid in action calibration.", "fno": "07892241", "keywords": [ "Avatars", "Visualization", "Calibration", "Legged Locomotion", "Tracking", "Virtual Environments", "Head", "Virtual Reality", "Prism Adaptation", "Self Avatar" ], "authors": [ { "affiliation": "Vanderbilt University, USA", "fullName": "Bobby Bodenheimer", "givenName": "Bobby", "surname": "Bodenheimer", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah, USA", "fullName": "Sarah Creem-Regehr", "givenName": "Sarah", "surname": "Creem-Regehr", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah, USA", "fullName": "Jeanine Stefanucci", "givenName": "Jeanine", "surname": "Stefanucci", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University, USA", "fullName": "Elena Shemetova", "givenName": "Elena", "surname": "Shemetova", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah, USA", "fullName": "William B. Thompson", "givenName": "William B.", "surname": "Thompson", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "141-147", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892240", "articleId": "12OmNwGZNLp", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892242", "articleId": "12OmNqIQSkJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223406", "title": "Self-characterstics and sound in immersive virtual reality — Estimating avatar weight from footstep sounds", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223406/12OmNAlvHUH", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446539", "title": "Investigating the Effects of Anthropomorphic Fidelity of Self-Avatars on Near Field Depth Perception in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446539/13bd1h03qOe", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08267487", "title": "Locomotive Recalibration and Prism Adaptation of Children and Teens in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2018/04/08267487/13rRUxYrbMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714123", "title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a792", "title": "Perception of Symmetry of Actual and Modulated Self-Avatar Gait Movements During Treadmill Walking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a792/1CJe47o4BRm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798263", "title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998305", "title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998305/1hpPBuW1ahy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090412", "title": "Modulating The Gait Of A Real-Time Self-Avatar To Induce Changes In Stride Length During Treadmill Walking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090412/1jIxkYhRpKg", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090634", "title": "Rhythmic proprioceptive stimulation improves embodiment in a walking avatar when added to visual stimulation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090634/1jIxkrgIlEY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090453", "title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1fdV4lC", "doi": "10.1109/VR.2018.8446189", "title": "Towards Revisiting Passability Judgments in Real and Immersive Virtual Environments", "normalizedTitle": "Towards Revisiting Passability Judgments in Real and Immersive Virtual Environments", "abstract": "Every task we perform in our day-to-day lives requires us to make judgements about size, distance, depth, etc. The same is true for tasks in an immersive virtual environments (IVE). Increasingly, Virtual Reality (VR) applications are being developed for training and entertainment, many of which require the user to determining whether s/he can pass through an opening. Typically, people determine their ability to pass through an aperture by comparing the width of their shoulders to the width of the opening. Thus, judgments of size and distance in an IVE are necessary for accurate judgments of passability. In this experiment, we empirically evaluate how passability judgments in an IVE, viewed through a Head-Mounted Display (HMD), compare to judgments made in the real world. An exact to scale virtual replica of the room and apparatus was used for the VR condition. Results indicate that the accuracy of passability judgments seem to be comparable to the real world.", "abstracts": [ { "abstractType": "Regular", "content": "Every task we perform in our day-to-day lives requires us to make judgements about size, distance, depth, etc. The same is true for tasks in an immersive virtual environments (IVE). Increasingly, Virtual Reality (VR) applications are being developed for training and entertainment, many of which require the user to determining whether s/he can pass through an opening. Typically, people determine their ability to pass through an aperture by comparing the width of their shoulders to the width of the opening. Thus, judgments of size and distance in an IVE are necessary for accurate judgments of passability. In this experiment, we empirically evaluate how passability judgments in an IVE, viewed through a Head-Mounted Display (HMD), compare to judgments made in the real world. An exact to scale virtual replica of the room and apparatus was used for the VR condition. Results indicate that the accuracy of passability judgments seem to be comparable to the real world.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Every task we perform in our day-to-day lives requires us to make judgements about size, distance, depth, etc. The same is true for tasks in an immersive virtual environments (IVE). Increasingly, Virtual Reality (VR) applications are being developed for training and entertainment, many of which require the user to determining whether s/he can pass through an opening. Typically, people determine their ability to pass through an aperture by comparing the width of their shoulders to the width of the opening. Thus, judgments of size and distance in an IVE are necessary for accurate judgments of passability. In this experiment, we empirically evaluate how passability judgments in an IVE, viewed through a Head-Mounted Display (HMD), compare to judgments made in the real world. An exact to scale virtual replica of the room and apparatus was used for the VR condition. Results indicate that the accuracy of passability judgments seem to be comparable to the real world.", "fno": "08446189", "keywords": [ "Helmet Mounted Displays", "Virtual Reality", "Virtual Reality Applications", "IVE", "HMD", "Head Mounted Display", "VR Condition", "Day To Day Lives", "Immersive Virtual Environments", "Virtual Replica", "Accurate Judgments", "Entertainment", "Apertures", "Virtual Environments", "Estimation", "Psychology", "Task Analysis", "Resists", "Human Centered Computing HCI Design And Evaluation Methods", "Human Centered Computing Empirical Studies In HCI" ], "authors": [ { "affiliation": "School of Computing", "fullName": "Ayush Bhargava", "givenName": "Ayush", "surname": "Bhargava", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Psychology", "fullName": "Kathryn M. Lucaites", "givenName": "Kathryn M.", "surname": "Lucaites", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Psychology", "fullName": "Leah S. Hartman", "givenName": "Leah S.", "surname": "Hartman", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Psychology", "fullName": "Hannah Solini", "givenName": "Hannah", "surname": "Solini", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computing", "fullName": "Jeffrey W. Bertrand", "givenName": "Jeffrey W.", "surname": "Bertrand", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computing", "fullName": "Andrew C. Robb", "givenName": "Andrew C.", "surname": "Robb", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Psychology", "fullName": "Christopher C. Pagano", "givenName": "Christopher C.", "surname": "Pagano", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computing", "fullName": "Sabarish V. Babu", "givenName": "Sabarish V.", "surname": "Babu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "513-514", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446583", "articleId": "13bd1ftOBDp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446227", "articleId": "13bd1eTtWZ2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504729", "title": "Influence by others' opinions: Social pressure from agents in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504729/12OmNs5rkT8", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643340", "title": "Interpersonal Affordances and Social Dynamics in Collaborative Immersive Virtual Environments: Passing Together Through Apertures", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643340/18K0nmkwheo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049626", "title": "Can I Squeeze Through? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Lateral Passability in VR", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049626/1KYoySw7RM4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a308", "title": "Empirically Evaluating the Effects of Eye Height and Self-Avatars on Dynamic Passability Affordances in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a308/1MNgWLowz1m", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798036", "title": "Vibro-tactile Feedback for Real-world Awareness in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798036/1cJ15HGOeqc", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089552", "title": "The Role of Viewing Distance and Feedback on Affordance Judgments in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089552/1jIx8sfGbSw", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089645", "title": "Comparative Evaluation of Viewing and Self-Representation on Passability Affordances to a Realistic Sliding Doorway in Real and Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089645/1jIx9zwn7SE", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090660", "title": "Relative Room Size Judgments in Impossible Spaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090660/1jIxqsUN6ik", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09440766", "title": "Did I Hit the Door? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Frontal Passability in VR", "doi": null, "abstractUrl": "/journal/tg/2022/12/09440766/1tTpcuKN5jW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a719", "title": "Attitude Change in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a719/1tnXhqirhcc", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxwwL7jmE", "doi": "10.1109/VRW50115.2020.00036", "title": "Walk this way: Evaluating the effect of perceived gender and attractiveness of motion on proximity in virtual reality", "normalizedTitle": "Walk this way: Evaluating the effect of perceived gender and attractiveness of motion on proximity in virtual reality", "abstract": "In human interaction, people will keep different distances from each other depending on their gender: males will stand further away from males and closer to females. However, many other variables influence proximity, such as appearance characteristics of the virtual character (e.g., attractiveness, etc.). Our study focuses on proximity to virtual walkers in virtual reality (VR), where gender could be inferred from motion only. We applied a set of male and female walking motions (motion capture) to a wooden mannequin, and displayed them to the participant embodied in a virtual avatar in VR. Participants used the controller to stop the approaching mannequin when they felt it was uncomfortably close to them. We hypothesized that proximity will be affected by the gender of the character, but also the gender of the participant. We additionally expected some motions to be rated more attractive than others and that attractive motions would reduce the proximity measure. Our results show support for the last two assumptions, but no difference in proximity was found according to the gender of the character&#x2019;s motion. Our findings have implications for the design of virtual characters in interactive virtual environments.", "abstracts": [ { "abstractType": "Regular", "content": "In human interaction, people will keep different distances from each other depending on their gender: males will stand further away from males and closer to females. However, many other variables influence proximity, such as appearance characteristics of the virtual character (e.g., attractiveness, etc.). Our study focuses on proximity to virtual walkers in virtual reality (VR), where gender could be inferred from motion only. We applied a set of male and female walking motions (motion capture) to a wooden mannequin, and displayed them to the participant embodied in a virtual avatar in VR. Participants used the controller to stop the approaching mannequin when they felt it was uncomfortably close to them. We hypothesized that proximity will be affected by the gender of the character, but also the gender of the participant. We additionally expected some motions to be rated more attractive than others and that attractive motions would reduce the proximity measure. Our results show support for the last two assumptions, but no difference in proximity was found according to the gender of the character&#x2019;s motion. Our findings have implications for the design of virtual characters in interactive virtual environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In human interaction, people will keep different distances from each other depending on their gender: males will stand further away from males and closer to females. However, many other variables influence proximity, such as appearance characteristics of the virtual character (e.g., attractiveness, etc.). Our study focuses on proximity to virtual walkers in virtual reality (VR), where gender could be inferred from motion only. We applied a set of male and female walking motions (motion capture) to a wooden mannequin, and displayed them to the participant embodied in a virtual avatar in VR. Participants used the controller to stop the approaching mannequin when they felt it was uncomfortably close to them. We hypothesized that proximity will be affected by the gender of the character, but also the gender of the participant. We additionally expected some motions to be rated more attractive than others and that attractive motions would reduce the proximity measure. Our results show support for the last two assumptions, but no difference in proximity was found according to the gender of the character’s motion. Our findings have implications for the design of virtual characters in interactive virtual environments.", "fno": "09090486", "keywords": [ "Virtual Reality", "Legged Locomotion", "Psychology", "Animation", "Conferences", "Task Analysis" ], "authors": [ { "affiliation": "Inria,Rennes", "fullName": "Katja Zibrek", "givenName": "Katja", "surname": "Zibrek", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria,Rennes", "fullName": "Benjamin Niay", "givenName": "Benjamin", "surname": "Niay", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ Rennes,Inria, M2S,Rennes", "fullName": "Anne-Hélène Olivier", "givenName": "Anne-Hélène", "surname": "Olivier", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria,Rennes", "fullName": "Ludovic Hoyet", "givenName": "Ludovic", "surname": "Hoyet", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria,Rennes", "fullName": "Julien Pettre", "givenName": "Julien", "surname": "Pettre", "__typename": "ArticleAuthorType" }, { "affiliation": "Trinity College Dublin", "fullName": "Rachel McDonnell", "givenName": "Rachel", "surname": "McDonnell", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "169-170", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090666", "articleId": "1jIxosBkVqw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090656", "articleId": "1jIxzMPf8g8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802054", "title": "Time perception during walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802054/12OmNBpmDG4", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2013/5075/0/06529474", "title": "Energy efficient proximity alert on Android", "doi": null, "abstractUrl": "/proceedings-article/percomw/2013/06529474/12OmNxWLTtk", "parentPublication": { "id": "proceedings/percomw/2013/5075/0", "title": "2013 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2018/2205/0/08402731", "title": "Motion assistance with an exoskeleton for stair climb", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2018/08402731/12OmNy7Qflu", "parentPublication": { "id": "proceedings/aqtr/2018/2205/0", "title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892337", "title": "Virginia tech's study hall: A virtual method of loci mnemotechnic study using a neurologically-based, mechanism-driven, approach to immersive learning research", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892337/12OmNzXnNz7", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446426", "title": "Walk-Centric User Interfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446426/13bd1fHrlRZ", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1996/02/mcg1996020040", "title": "Parameterized Gait Synthesis", "doi": null, "abstractUrl": "/magazine/cg/1996/02/mcg1996020040/13rRUyuvRra", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a672", "title": "Proximity in VR: The Importance of Character Attractiveness and Participant Gender", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a672/1CJdlUeTTlC", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2022/6814/0/681400a118", "title": "Crowd Simulation with Feedback Based on Locomotion State", "doi": null, "abstractUrl": "/proceedings-article/cw/2022/681400a118/1I6RQ8VlGNi", "parentPublication": { "id": "proceedings/cw/2022/6814/0", "title": "2022 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802449", "title": "The Effect of Proximity in Social Data Charts on Perceived Unity", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802449/1cJ6X5kCs5G", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089573", "title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users&#x2019; Affective and Non-Verbal Behaviors", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yfxKkf4zpm", "doi": "10.1109/ISMAR-Adjunct54149.2021.00061", "title": "Walking Through Walls: The Effect of Collision-Based Feedback on Affordance Judgments in Augmented Reality", "normalizedTitle": "Walking Through Walls: The Effect of Collision-Based Feedback on Affordance Judgments in Augmented Reality", "abstract": "Feedback about actions in augmented reality (AR) is limited and can be ambi due to the nature of interacting with virtual objects. AR devices also have a restricted field of view (FOV), limiting the amount of available visual information that can be used to perform an action or provide feedback during or after an action. We used the Microsoft HoloLens 1 to investigate whether perceptual-motor, collision-based outcome feedback calibrates judgments of whether one can pass through an aperture in AR. Additionally, we manipulated the amount of information available within the FOV by having participants view the aperture at two different distances. Feedback calibrated passing-through judgments at both distances but resulted in an overestimation of the just-passable aperture width. Moreover, the far viewing condition had more overestimation of just-passable aperture width than the near viewing condition.", "abstracts": [ { "abstractType": "Regular", "content": "Feedback about actions in augmented reality (AR) is limited and can be ambi due to the nature of interacting with virtual objects. AR devices also have a restricted field of view (FOV), limiting the amount of available visual information that can be used to perform an action or provide feedback during or after an action. We used the Microsoft HoloLens 1 to investigate whether perceptual-motor, collision-based outcome feedback calibrates judgments of whether one can pass through an aperture in AR. Additionally, we manipulated the amount of information available within the FOV by having participants view the aperture at two different distances. Feedback calibrated passing-through judgments at both distances but resulted in an overestimation of the just-passable aperture width. Moreover, the far viewing condition had more overestimation of just-passable aperture width than the near viewing condition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Feedback about actions in augmented reality (AR) is limited and can be ambi due to the nature of interacting with virtual objects. AR devices also have a restricted field of view (FOV), limiting the amount of available visual information that can be used to perform an action or provide feedback during or after an action. We used the Microsoft HoloLens 1 to investigate whether perceptual-motor, collision-based outcome feedback calibrates judgments of whether one can pass through an aperture in AR. Additionally, we manipulated the amount of information available within the FOV by having participants view the aperture at two different distances. Feedback calibrated passing-through judgments at both distances but resulted in an overestimation of the just-passable aperture width. Moreover, the far viewing condition had more overestimation of just-passable aperture width than the near viewing condition.", "fno": "129800a266", "keywords": [ "Augmented Reality", "Calibration", "Feedback", "Human Factors", "Microsoft Holo Lens 1", "Perceptual Motor", "FOV", "Just Passable Aperture Width", "Viewing Condition", "Walls", "Collision Based Feedback", "Affordance Judgments", "Augmented Reality", "Virtual Objects", "Visual Information", "Collision Based Outcome Feedback", "Performance Evaluation", "Legged Locomotion", "Visualization", "Limiting", "Multimedia Systems", "Affordances", "Apertures", "Augmented Reality", "Affordances", "Feedback", "Perception" ], "authors": [ { "affiliation": "University of Utah,USA", "fullName": "Holly C. Gagnon", "givenName": "Holly C.", "surname": "Gagnon", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University,USA", "fullName": "Dun Na", "givenName": "Dun", "surname": "Na", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah,USA", "fullName": "Keith Heiner", "givenName": "Keith", "surname": "Heiner", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah,USA", "fullName": "Jeanine Stefanucci", "givenName": "Jeanine", "surname": "Stefanucci", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah,USA", "fullName": "Sarah Creem-Regehr", "givenName": "Sarah", "surname": "Creem-Regehr", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University,USA", "fullName": "Bobby Bodenheimer", "givenName": "Bobby", "surname": "Bodenheimer", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "266-267", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a260", "articleId": "1yeQKPuFLTW", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a268", "articleId": "1yeQL1J7Xsk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a143", "title": "[POSTER] Walking in Augmented Reality: An Experimental Evaluation by Playing with a Virtual Hopscotch", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a143/12OmNA14A7W", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446177", "title": "You Shall Not Pass: Non-Intrusive Feedback for Virtual Walls in VR Environments with Room-Scale Mapping", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446177/13bd1eSlyu1", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07955099", "title": "Collision Avoidance Behavior between Walkers: Global and Local Motion Cues", "doi": null, "abstractUrl": "/journal/tg/2018/07/07955099/13rRUxcbnHk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010035", "title": "Haptic Feedback for Enhancing Realism of Walking Simulations", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010035/13rRUy0HYRD", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643340", "title": "Interpersonal Affordances and Social Dynamics in Collaborative Immersive Virtual Environments: Passing Together Through Apertures", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643340/18K0nmkwheo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a786", "title": "An Examination on Reduction of Displayed Character Shake while Walking in Place with AR Glasses", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a786/1CJf8OTaee4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798095", "title": "Distance Judgments to On- and Off-Ground Objects in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798095/1cJ0Yxz6rrG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089552", "title": "The Role of Viewing Distance and Feedback on Affordance Judgments in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089552/1jIx8sfGbSw", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a264", "title": "Towards an AR game for walking rehabilitation: Preliminary study of the impact of augmented feedback modalities on walking speed", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a264/1pBMjJvQae4", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a559", "title": "Affordance Judgments in Mobile Augmented Reality with Cues", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a559/1tnXRvkRMw8", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAIMObM", "title": "2015 3rd IEEE VR International Workshop on Virtual and Augmented Assistive Technology (VAAT)", "acronym": "vaat", "groupId": "1803604", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNAmE5Y8", "doi": "10.1109/VAAT.2015.7155406", "title": "Towards attention monitoring of older adults with cognitive impairment during interaction with an embodied conversational agent", "normalizedTitle": "Towards attention monitoring of older adults with cognitive impairment during interaction with an embodied conversational agent", "abstract": "Embodied conversational agents (ECAs) are virtual characters using verbal and non-verbal communication for Human-machine interaction. The aim of our research is to create an ECA-based user interface for assistive technologies targeting older adults with cognitive impairment. Our design methodology is a co-design living lab approach, collecting design guidelines through questionnaires, focus groups and user trials. In this paper, we report on the results of the first phase of this iterative design process. We developed Louise, a semi-automatic ECA prototype that aims to compensate, through attention monitoring, for a user's attentional disorders by performing autonomous prompting, i.e., calling the user to regain his or her attention in case he or she got distracted. We evaluated the performance of Louise with a group of experts in assistive technologies and collected their feedback. Louise's simple attention estimator is more than 80% accurate. The system got quite positive reviews from users.", "abstracts": [ { "abstractType": "Regular", "content": "Embodied conversational agents (ECAs) are virtual characters using verbal and non-verbal communication for Human-machine interaction. The aim of our research is to create an ECA-based user interface for assistive technologies targeting older adults with cognitive impairment. Our design methodology is a co-design living lab approach, collecting design guidelines through questionnaires, focus groups and user trials. In this paper, we report on the results of the first phase of this iterative design process. We developed Louise, a semi-automatic ECA prototype that aims to compensate, through attention monitoring, for a user's attentional disorders by performing autonomous prompting, i.e., calling the user to regain his or her attention in case he or she got distracted. We evaluated the performance of Louise with a group of experts in assistive technologies and collected their feedback. Louise's simple attention estimator is more than 80% accurate. The system got quite positive reviews from users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Embodied conversational agents (ECAs) are virtual characters using verbal and non-verbal communication for Human-machine interaction. The aim of our research is to create an ECA-based user interface for assistive technologies targeting older adults with cognitive impairment. Our design methodology is a co-design living lab approach, collecting design guidelines through questionnaires, focus groups and user trials. In this paper, we report on the results of the first phase of this iterative design process. We developed Louise, a semi-automatic ECA prototype that aims to compensate, through attention monitoring, for a user's attentional disorders by performing autonomous prompting, i.e., calling the user to regain his or her attention in case he or she got distracted. We evaluated the performance of Louise with a group of experts in assistive technologies and collected their feedback. Louise's simple attention estimator is more than 80% accurate. The system got quite positive reviews from users.", "fno": "07155406", "keywords": [ "Face", "Monitoring", "Dementia", "Assistive Technology", "Estimation", "Prototypes", "Animation", "Dementia", "Embodied Conversational Agents", "Assistive Technologies", "Attention Estimation" ], "authors": [ { "affiliation": "MINES ParisTech, PSL Research University, France", "fullName": "Pierre Wargnier", "givenName": "Pierre", "surname": "Wargnier", "__typename": "ArticleAuthorType" }, { "affiliation": "CEN Stimco, France", "fullName": "Adrien Malaise", "givenName": "Adrien", "surname": "Malaise", "__typename": "ArticleAuthorType" }, { "affiliation": "Broca hospital, Université Paris Descartes, France", "fullName": "Julien Jacquemot", "givenName": "Julien", "surname": "Jacquemot", "__typename": "ArticleAuthorType" }, { "affiliation": "MINES ParisTech, PSL Research University, CEN Stimco, France", "fullName": "Samuel Benveniste", "givenName": "Samuel", "surname": "Benveniste", "__typename": "ArticleAuthorType" }, { "affiliation": "MINES ParisTech, PSL Research University, France", "fullName": "Pierre Jouvelot", "givenName": "Pierre", "surname": "Jouvelot", "__typename": "ArticleAuthorType" }, { "affiliation": "Broca hospital, Université Paris Descartes, France", "fullName": "Maribel Pino", "givenName": "Maribel", "surname": "Pino", "__typename": "ArticleAuthorType" }, { "affiliation": "Broca hospital, Université Paris Descartes, France", "fullName": "Anne-Sophie Rigaud", "givenName": "Anne-Sophie", "surname": "Rigaud", "__typename": "ArticleAuthorType" } ], "idPrefix": "vaat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "23-28", "year": "2015", "issn": null, "isbn": "978-1-4673-6518-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07155405", "articleId": "12OmNASILJm", "__typename": "AdjacentArticleType" }, "next": { "fno": "07155407", "articleId": "12OmNwGqBpc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/segah/2016/2210/0/07586282", "title": "Field evaluation with cognitively-impaired older adults of attention management in the Embodied Conversational Agent Louise", "doi": null, "abstractUrl": "/proceedings-article/segah/2016/07586282/12OmNCfjeAt", "parentPublication": { "id": "proceedings/segah/2016/2210/0", "title": "2016 IEEE International Conference on Serious Games and Applications for Health (SeGAH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2015/7367/0/7367a491", "title": "Embodied Conversational Agents: Social or Nonsocial?", "doi": null, "abstractUrl": "/proceedings-article/hicss/2015/7367a491/12OmNwcCIKP", "parentPublication": { "id": "proceedings/hicss/2015/7367/0", "title": "2015 48th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci-cc/2012/2795/0/06311164", "title": "Toward a memory assistant companion for the individuals with mild memory impairment", "doi": null, "abstractUrl": "/proceedings-article/icci-cc/2012/06311164/12OmNzd7bZX", "parentPublication": { "id": "proceedings/icci-cc/2012/2795/0", "title": "2012 11th IEEE International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedeg/2018/2521/0/08372345", "title": "Location System and Monitoring of Vital Signs in Older Adults and People with Alzheimer", "doi": null, "abstractUrl": "/proceedings-article/icedeg/2018/08372345/12OmNzmLxQE", "parentPublication": { "id": "proceedings/icedeg/2018/2521/0", "title": "2018 Fifth International Conference on eDemocracy & eGovernment (ICEDEG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2021/0403/0/09704971", "title": "Proposal for a Personalized Adaptive Speaker Service to Support the Elderly at Home", "doi": null, "abstractUrl": "/proceedings-article/snpd/2021/09704971/1AUphEOwcc8", "parentPublication": { "id": "proceedings/snpd/2021/0403/0", "title": "2021 IEEE/ACIS 22nd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09684685", "title": "Artificial Emotional Intelligence in Socially Assistive Robots for Older Adults: A Pilot Study", "doi": null, "abstractUrl": "/journal/ta/5555/01/09684685/1AgmkaKnJyE", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10044277", "title": "Real-time Multi-map Saliency-driven Gaze Behavior for Non-conversational Characters", "doi": null, "abstractUrl": "/journal/tg/5555/01/10044277/1KL728MHdtu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a036", "title": "Multimodal Embodied Conversational Agents: A discussion of architectures, frameworks and modules for commercial applications", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a036/1KmFgxp1TOw", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2019/3888/0/08925495", "title": "A Computational Model for Managing Impressions of an Embodied Conversational Agent in Real-Time", "doi": null, "abstractUrl": "/proceedings-article/acii/2019/08925495/1fHGH3UBS5W", "parentPublication": { "id": "proceedings/acii/2019/3888/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wiiatw/2006/2749/0/04053278", "title": "An Embodied Conversational Agent for Intelligent Web Interaction on Pandemic Crisis Communication", "doi": null, "abstractUrl": "/proceedings-article/wiiatw/2006/04053278/1iCAnGKbYJy", "parentPublication": { "id": "proceedings/wiiatw/2006/2749/0", "title": "2006 IEEE/WIC/ACM International Conference on Web Intelligence International Intelligence Agent Technology Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwpGgL6", "title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces", "acronym": "icmi", "groupId": "1002175", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNxG1yL1", "doi": "10.1109/ICMI.2002.1167038", "title": "Multi-Modal Embodied Agents Scripting", "normalizedTitle": "Multi-Modal Embodied Agents Scripting", "abstract": "Embodied agents present ongoing challenging agenda for research in multi-modal user interfaces and human-computer-interaction. Such agent metaphors will only be widely applicable to online applications when there is a standardised way to map underlying engines with the visual presentation of the agents. This paper delineates the functions and specifications of a mark-up language for scripting the animation of virtual characters. The language is called: Character Mark-up Language (CML) and is an XML-based character attribute definition and animation scripting language designed to aid in the rapid incorporation of lifelike characters/agents into online applications or virtual reality worlds. This multi-modal scripting language is designed to be easily understandable by human animators and easily generated by a software process such as software agents. CML is constructed based jointly on motion and multi-modal capabilities of virtual life-like figures. The paper further illustrates the constructs of the language and describes a real-time execution architecture that demonstrates the use of such a language as a 4G language to easily utilise and integrate MPEG-4 media objects in online interfaces and virtual environments.", "abstracts": [ { "abstractType": "Regular", "content": "Embodied agents present ongoing challenging agenda for research in multi-modal user interfaces and human-computer-interaction. Such agent metaphors will only be widely applicable to online applications when there is a standardised way to map underlying engines with the visual presentation of the agents. This paper delineates the functions and specifications of a mark-up language for scripting the animation of virtual characters. The language is called: Character Mark-up Language (CML) and is an XML-based character attribute definition and animation scripting language designed to aid in the rapid incorporation of lifelike characters/agents into online applications or virtual reality worlds. This multi-modal scripting language is designed to be easily understandable by human animators and easily generated by a software process such as software agents. CML is constructed based jointly on motion and multi-modal capabilities of virtual life-like figures. The paper further illustrates the constructs of the language and describes a real-time execution architecture that demonstrates the use of such a language as a 4G language to easily utilise and integrate MPEG-4 media objects in online interfaces and virtual environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Embodied agents present ongoing challenging agenda for research in multi-modal user interfaces and human-computer-interaction. Such agent metaphors will only be widely applicable to online applications when there is a standardised way to map underlying engines with the visual presentation of the agents. This paper delineates the functions and specifications of a mark-up language for scripting the animation of virtual characters. The language is called: Character Mark-up Language (CML) and is an XML-based character attribute definition and animation scripting language designed to aid in the rapid incorporation of lifelike characters/agents into online applications or virtual reality worlds. This multi-modal scripting language is designed to be easily understandable by human animators and easily generated by a software process such as software agents. CML is constructed based jointly on motion and multi-modal capabilities of virtual life-like figures. The paper further illustrates the constructs of the language and describes a real-time execution architecture that demonstrates the use of such a language as a 4G language to easily utilise and integrate MPEG-4 media objects in online interfaces and virtual environments.", "fno": "18340454", "keywords": [ "Embodied Agent", "Lifelike Characters", "MPEG 4", "Mark Up Languages", "Automated Animation Scripting", "CML", "Animated Expression" ], "authors": [ { "affiliation": "Imperial College London", "fullName": "Yasmine Arafa", "givenName": "Yasmine", "surname": "Arafa", "__typename": "ArticleAuthorType" }, { "affiliation": "Imperial College London", "fullName": "Abe Mamdani", "givenName": "Abe", "surname": "Mamdani", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-10-01T00:00:00", "pubType": "proceedings", "pages": "454", "year": "2002", "issn": null, "isbn": "0-7695-1834-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "18340448", "articleId": "12OmNsd6vi1", "__typename": "AdjacentArticleType" }, "next": { "fno": "18340460", "articleId": "12OmNvlPkvW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/compsac/2002/1727/0/17270581", "title": "MADSS: A Multi-Agent Based Distributed Scripting System", "doi": null, "abstractUrl": "/proceedings-article/compsac/2002/17270581/12OmNAoUTqq", "parentPublication": { "id": "proceedings/compsac/2002/1727/0", "title": "Proceedings 26th Annual International Computer Software and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl/1998/8712/0/87120280", "title": "Multimedia Workshop: Exploring the Benefits of a Visual Scripting Language", "doi": null, "abstractUrl": "/proceedings-article/vl/1998/87120280/12OmNBl6EH3", "parentPublication": { "id": "proceedings/vl/1998/8712/0", "title": "Visual Languages, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/1999/0278/0/02780558", "title": "Agents and Workflow -- An Intimate Connection, or Just Friends?", "doi": null, "abstractUrl": "/proceedings-article/tools/1999/02780558/12OmNCcbEkJ", "parentPublication": { "id": "proceedings/tools/1999/0278/0", "title": "Proceedings of Technology of Object-Oriented Languages and Systems - TOOLS 30 (Cat. No.PR00278)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2008/3095/0/3095b049", "title": "A Scripting Approach for Workflow of Agents", "doi": null, "abstractUrl": "/proceedings-article/aina/2008/3095b049/12OmNqOwQBJ", "parentPublication": { "id": "proceedings/aina/2008/3095/0", "title": "22nd International Conference on Advanced Information Networking and Applications (aina 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/casa/2003/1934/0/19340105", "title": "XSTEP: A Markup Language for Embodied Agents", "doi": null, "abstractUrl": "/proceedings-article/casa/2003/19340105/12OmNyr8YcJ", "parentPublication": { "id": "proceedings/casa/2003/1934/0", "title": "Computer Animation and Social Agents, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2011/4513/2/4513b046", "title": "Scalable Perception for BDI-Agents Embodied in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2011/4513b046/12OmNz5JBQk", "parentPublication": { "id": "proceedings/wi-iat/2011/4513/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ias/2008/3324/0/3324a227", "title": "Challenges for Security Typed Web Scripting Languages Design", "doi": null, "abstractUrl": "/proceedings-article/ias/2008/3324a227/12OmNzC5Tcl", "parentPublication": { "id": "proceedings/ias/2008/3324/0", "title": "Information Assurance and Security, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciids/2009/3580/0/3580a183", "title": "Representing the Meaning of Symbols in Autonomous Agents", "doi": null, "abstractUrl": "/proceedings-article/aciids/2009/3580a183/12OmNzxyiN6", "parentPublication": { "id": "proceedings/aciids/2009/3580/0", "title": "Intelligent Information and Database Systems, Asian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/1995/02/u2050", "title": "Quality Assurance in Scripting", "doi": null, "abstractUrl": "/magazine/mu/1995/02/u2050/13rRUwI5UcQ", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050655", "title": "MPML3D: Scripting Agents for the 3D Internet", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050655/13rRUwdrdSx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirK", "title": "2018 IEEE 1st Workshop on Animation in Virtual and Augmented Environments (ANIVAE)", "acronym": "anivae", "groupId": "1828984", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Wuc3a1", "doi": "10.1109/ANIVAE.2018.8587269", "title": "Animating Participants in Co-located Playful Mixed-Reality Installations", "normalizedTitle": "Animating Participants in Co-located Playful Mixed-Reality Installations", "abstract": "As Virtual and Augmented Reality technologies become more prevalent, they are being increasingly utilized in artistic installations in order to offer participants a more immersive spatial experience. Such installations typically focus on the experience of individual users. Nevertheless, shared co-located experiences with multiple users are becoming more common. Making participants aware of each other and facilitating interaction between them in such virtual environments can be quite challenging, particularly considering the technical limitations that still exist despite the advent of new technologies. This paper presents a few different animation strategies based on participants' physical movements in three mixed-reality installations.", "abstracts": [ { "abstractType": "Regular", "content": "As Virtual and Augmented Reality technologies become more prevalent, they are being increasingly utilized in artistic installations in order to offer participants a more immersive spatial experience. Such installations typically focus on the experience of individual users. Nevertheless, shared co-located experiences with multiple users are becoming more common. Making participants aware of each other and facilitating interaction between them in such virtual environments can be quite challenging, particularly considering the technical limitations that still exist despite the advent of new technologies. This paper presents a few different animation strategies based on participants' physical movements in three mixed-reality installations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As Virtual and Augmented Reality technologies become more prevalent, they are being increasingly utilized in artistic installations in order to offer participants a more immersive spatial experience. Such installations typically focus on the experience of individual users. Nevertheless, shared co-located experiences with multiple users are becoming more common. Making participants aware of each other and facilitating interaction between them in such virtual environments can be quite challenging, particularly considering the technical limitations that still exist despite the advent of new technologies. This paper presents a few different animation strategies based on participants' physical movements in three mixed-reality installations.", "fno": "08587269", "keywords": [ "Augmented Reality", "Computer Animation", "Co Located Playful Mixed Reality Installations", "Virtual Reality", "Augmented Reality", "Animation Strategies", "Virtual Environments", "Shared Co Located Experiences", "Immersive Spatial Experience", "Artistic Installations", "Animating Participants", "Animation", "Visualization", "Tracking", "Avatars", "Games", "Headphones", "Mixed Reality", "Co Located Interaction", "Animation", "Games" ], "authors": [ { "affiliation": "Department of Digital Media, UAS Upper Austria", "fullName": "Jürgen Hagler", "givenName": "Jürgen", "surname": "Hagler", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Digital Media, UAS Upper Austria", "fullName": "Michael Lankes", "givenName": "Michael", "surname": "Lankes", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Digital Media, UAS Upper Austria", "fullName": "Jeremiah Diephuis", "givenName": "Jeremiah", "surname": "Diephuis", "__typename": "ArticleAuthorType" } ], "idPrefix": "anivae", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "&#xa;1&#xa;-&#xa;4&#xa;", "year": "2018", "issn": null, "isbn": "978-1-5386-6511-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08587266", "articleId": "17D45VsBTU9", "__typename": "AdjacentArticleType" }, "next": { "fno": "08587271", "articleId": "17D45WwsQ7y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2017/3588/0/3588a146", "title": "Assessing the Experience of Immersion in Electronic Games", "doi": null, "abstractUrl": "/proceedings-article/svr/2017/3588a146/12OmNApu5G6", "parentPublication": { "id": "proceedings/svr/2017/3588/0", "title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a100", "title": "TactileVR: Integrating Physical Toys into Learn and Play Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a100/12OmNvFHfKP", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892258", "title": "All are welcome: Using VR ethnography to explore harassment behavior in immersive social virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892258/12OmNx0A7Fw", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643417", "title": "Not Alone Here?! Scalability and User Experience of Embodied Ambient Crowds in Distributed Social Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643417/18K0qdWS7xm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09744001", "title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mass/2022/7180/0/718000a728", "title": "Virtual Reality-Based Gymnastics Visualization Using Real-Time Motion Capture Suit", "doi": null, "abstractUrl": "/proceedings-article/mass/2022/718000a728/1JeEp76ujg4", "parentPublication": { "id": "proceedings/mass/2022/7180/0", "title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798159", "title": "Towards an Affordable Virtual Reality Solution for Cardiopulmonary Resuscitation Training", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798159/1cJ0OTTPhdu", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797787", "title": "The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797787/1cJ179JUrPa", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2017/0459/0/07938144", "title": "Creating immersive and aesthetic auditory spaces in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/sive/2017/07938144/1h0L85rWBYA", "parentPublication": { "id": "proceedings/sive/2017/0459/0", "title": "2017 IEEE 3rd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a351", "title": "Prototyping Virtual Reality Interactions in Medical Simulation Employing Speech Recognition", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a351/1oZBzLnFAc0", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0MR4xjWg", "doi": "10.1109/VR.2019.8798122", "title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars", "normalizedTitle": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars", "abstract": "In this paper, we present an initial study to investigate the effects stereotypical settings and avatar appearance of embodied avatars on a user's implicit racial bias. Literature demonstrates the effects embodied avatars can have on a users biases, both implicit and explicit. These shifts in bias and behavior could be caused by the avatars appearance or the stereotypical environment. Few studies have investigated the presence of stereotypical triggers and avatar representation in a learning, game-like environment. With virtual reality entertainment and training simulations becoming popular it is necessary to better understand the effects avatars can have on our behavior, perception, and biases. This study will investigate the potential effects of embodied avatars reinforcing a user's implicit racial biases.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present an initial study to investigate the effects stereotypical settings and avatar appearance of embodied avatars on a user's implicit racial bias. Literature demonstrates the effects embodied avatars can have on a users biases, both implicit and explicit. These shifts in bias and behavior could be caused by the avatars appearance or the stereotypical environment. Few studies have investigated the presence of stereotypical triggers and avatar representation in a learning, game-like environment. With virtual reality entertainment and training simulations becoming popular it is necessary to better understand the effects avatars can have on our behavior, perception, and biases. This study will investigate the potential effects of embodied avatars reinforcing a user's implicit racial biases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present an initial study to investigate the effects stereotypical settings and avatar appearance of embodied avatars on a user's implicit racial bias. Literature demonstrates the effects embodied avatars can have on a users biases, both implicit and explicit. These shifts in bias and behavior could be caused by the avatars appearance or the stereotypical environment. Few studies have investigated the presence of stereotypical triggers and avatar representation in a learning, game-like environment. With virtual reality entertainment and training simulations becoming popular it is necessary to better understand the effects avatars can have on our behavior, perception, and biases. This study will investigate the potential effects of embodied avatars reinforcing a user's implicit racial biases.", "fno": "08798122", "keywords": [ "Avatars", "Computer Based Training", "Entertainment", "Serious Games Computing", "Embodied Avatars", "Stereotypical Influences", "Implicit Racial Bias", "Stereotypical Environment", "Stereotypical Triggers", "Avatar Representation", "Learning", "Game Like Environment", "Virtual Reality Entertainment", "Training Simulations", "Avatars", "Atmospheric Measurements", "Particle Measurements", "Clothing", "Task Analysis", "Training", "Embodied Virtual Avatars", "Implicit Racial Bias", "Social Good" ], "authors": [ { "affiliation": "Clemson University", "fullName": "Divine Maloney", "givenName": "Divine", "surname": "Maloney", "__typename": "ArticleAuthorType" }, { "affiliation": "Clemson University", "fullName": "Andrew Robb", "givenName": "Andrew", "surname": "Robb", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1074-1075", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797748", "articleId": "1cJ17GWH4f6", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798077", "articleId": "1cJ17P8dQOs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2010/7846/0/05571304", "title": "Associating Avatars with Musical Genres", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571304/12OmNwswg1d", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273657", "title": "Avatar and participant gender differences in the perception of uncanniness of virtual humans", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273657/12OmNzZmZBE", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2018/1174/0/08659175", "title": "Do Great Minds Think Alike? : Racial/Ethnic and Gender Differences in Mindset of Undergraduate Engineering Students", "doi": null, "abstractUrl": "/proceedings-article/fie/2018/08659175/18j9jm5E6GI", "parentPublication": { "id": "proceedings/fie/2018/1174/0", "title": "2018 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798008", "title": "[DC] Embodied Virtual Avatars and Potential Negative Effects on Implicit Racial Bias", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798008/1cJ0WBlYR7G", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797926", "title": "Ethical Concerns of the Use of Virtual Avatars in Consumer Entertainment", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797926/1cJ1gv7LjFK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090416", "title": "Shooter Bias in Virtual Reality: The Effect of Avatar Race and Socioeconomic Status on Shooting Decisions", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090416/1jIxANOupNK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090457", "title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382876", "title": "Evidence of Racial Bias Using Immersive Virtual Reality: Analysis of Head and Hand Motions During Shooting Decisions", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382876/1saZsrqdHJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09495106", "title": "A Wheelchair Locomotion Interface in a VR Disability Simulation Reduces Implicit Bias", "doi": null, "abstractUrl": "/journal/tg/2022/12/09495106/1vyjtwGIZkQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f147", "title": "StylePeople: A Generative Model of Fullbody Human Avatars", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f147/1yeILFPUeE8", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0WBlYR7G", "doi": "10.1109/VR.2019.8798008", "title": "[DC] Embodied Virtual Avatars and Potential Negative Effects on Implicit Racial Bias", "normalizedTitle": "[DC] Embodied Virtual Avatars and Potential Negative Effects on Implicit Racial Bias", "abstract": "Embodied virtual avatars can powerfully affect a user's behavior. Some changes in behavior can be positive, although some can be negative and unknown to the user. If the presence of stereotypical triggers lead to an increase in implicit racial bias a hypothesis could be made that embodying an immersive virtual avatar could negatively effect a user's implicit bias, this would be a serious cause for concern with regard to the recent emergence of consumer virtual reality. Here I explain a pilot study and potential next steps for my research.", "abstracts": [ { "abstractType": "Regular", "content": "Embodied virtual avatars can powerfully affect a user's behavior. Some changes in behavior can be positive, although some can be negative and unknown to the user. If the presence of stereotypical triggers lead to an increase in implicit racial bias a hypothesis could be made that embodying an immersive virtual avatar could negatively effect a user's implicit bias, this would be a serious cause for concern with regard to the recent emergence of consumer virtual reality. Here I explain a pilot study and potential next steps for my research.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Embodied virtual avatars can powerfully affect a user's behavior. Some changes in behavior can be positive, although some can be negative and unknown to the user. If the presence of stereotypical triggers lead to an increase in implicit racial bias a hypothesis could be made that embodying an immersive virtual avatar could negatively effect a user's implicit bias, this would be a serious cause for concern with regard to the recent emergence of consumer virtual reality. Here I explain a pilot study and potential next steps for my research.", "fno": "08798008", "keywords": [ "Avatars", "Social Sciences Computing", "Implicit Racial Bias", "Stereotypical Triggers", "Immersive Virtual Avatar", "Implicit Bias", "Consumer Virtual Reality", "Embodied Virtual Avatars", "Avatars", "Games", "Solid Modeling", "Load Modeling", "Weapons", "Skin", "Embodied Virtual Avatars", "Implicit Racial Bias", "Social Good" ], "authors": [ { "affiliation": null, "fullName": "Divine Maloney", "givenName": "Divine", "surname": "Maloney", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1373-1374", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798086", "articleId": "1cJ15bXZE9a", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798256", "articleId": "1cJ0W1LjdW8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802113", "title": "Automatic acquisition and animation of virtual avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a724", "title": "Multimodal Affect Recognition in Virtual Worlds: Avatars Mirroring User's Affect", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a724/12OmNzahbSm", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d202", "title": "Measuring Hidden Bias within Face Recognition via Racial Phenotypes", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d202/1B12FxwJfcA", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3440", "title": "Quantifying Societal Bias Amplification in Image Captioning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3440/1H1lQDE7BiE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798122", "title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798122/1cJ0MR4xjWg", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797679", "title": "Shooter Bias and Socioeconomic Status in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797679/1cJ187OlfvG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/08928535", "title": "VR Disability Simulation Reduces Implicit Bias Towards Persons With Disabilities", "doi": null, "abstractUrl": "/journal/tg/2021/06/08928535/1fEi0BsQqBy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090416", "title": "Shooter Bias in Virtual Reality: The Effect of Avatar Race and Socioeconomic Status on Shooting Decisions", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090416/1jIxANOupNK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382876", "title": "Evidence of Racial Bias Using Immersive Virtual Reality: Analysis of Head and Hand Motions During Shooting Decisions", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382876/1saZsrqdHJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09495106", "title": "A Wheelchair Locomotion Interface in a VR Disability Simulation Reduces Implicit Bias", "doi": null, "abstractUrl": "/journal/tg/2022/12/09495106/1vyjtwGIZkQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxyeqrhrG", "doi": "10.1109/VRW50115.2020.00124", "title": "[DC] The Impact of Social Interactions on an Embodied Individual&#x2019;s Self-perception in Virtual Environments", "normalizedTitle": "[DC] The Impact of Social Interactions on an Embodied Individual’s Self-perception in Virtual Environments", "abstract": "In shared immersive virtual reality, users can interact with other participants and experience them as being present in the environment. Thereby different aspects of the respective interaction partners can have an impact on the perceived quality of the communication and possibly also the self-perception of an embodied user. This paper describes various factors the author aims to investigate during his doctoral studies. As the research area of embodied social interactions is broad, relevant factors and concrete research questions have been identified to investigate how social contact with one or multiple other persons may affect one&#x2019;s self-perception, behavior and her or his relationship with the others while being embodied in a virtual environment.", "abstracts": [ { "abstractType": "Regular", "content": "In shared immersive virtual reality, users can interact with other participants and experience them as being present in the environment. Thereby different aspects of the respective interaction partners can have an impact on the perceived quality of the communication and possibly also the self-perception of an embodied user. This paper describes various factors the author aims to investigate during his doctoral studies. As the research area of embodied social interactions is broad, relevant factors and concrete research questions have been identified to investigate how social contact with one or multiple other persons may affect one&#x2019;s self-perception, behavior and her or his relationship with the others while being embodied in a virtual environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In shared immersive virtual reality, users can interact with other participants and experience them as being present in the environment. Thereby different aspects of the respective interaction partners can have an impact on the perceived quality of the communication and possibly also the self-perception of an embodied user. This paper describes various factors the author aims to investigate during his doctoral studies. As the research area of embodied social interactions is broad, relevant factors and concrete research questions have been identified to investigate how social contact with one or multiple other persons may affect one’s self-perception, behavior and her or his relationship with the others while being embodied in a virtual environment.", "fno": "09090624", "keywords": [ "Avatars", "Virtual Environments", "Medical Treatment", "Obesity", "Conferences", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Collaborative Interaction" ], "authors": [ { "affiliation": "Julius-Maximilians-Universitat Würzburg,Human-Computer Interaction,Germany", "fullName": "David Mal", "givenName": "David", "surname": "Mal", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "545-546", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090417", "articleId": "1jIxqNN9Xqw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090547", "articleId": "1jIxw8zwtbO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/passat-socialcom/2012/5638/0/06406356", "title": "Social and Emotional Turn Taking for Embodied Conversational Agents", "doi": null, "abstractUrl": "/proceedings-article/passat-socialcom/2012/06406356/12OmNx5pj2h", "parentPublication": { "id": "proceedings/passat-socialcom/2012/5638/0", "title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2011/4513/2/4513b046", "title": "Scalable Perception for BDI-Agents Embodied in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2011/4513b046/12OmNz5JBQk", "parentPublication": { "id": "proceedings/wi-iat/2011/4513/2", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798348", "title": "Individual Differences in Embodied Distance Estimation in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798348/1cJ0H4fRjBS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798122", "title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798122/1cJ0MR4xjWg", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798008", "title": "[DC] Embodied Virtual Avatars and Potential Negative Effects on Implicit Racial Bias", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798008/1cJ0WBlYR7G", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hcc/2019/4125/0/412500a046", "title": "User-Aware Shared Perception for Embodied Agents", "doi": null, "abstractUrl": "/proceedings-article/hcc/2019/412500a046/1grQ2Ny6Xbq", "parentPublication": { "id": "proceedings/hcc/2019/4125/0", "title": "2019 IEEE International Conference on Humanized Computing and Communication (HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300g652", "title": "Embodied Question Answering in Photorealistic Environments With Point Cloud Perception", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300g652/1gyrqHbi7cY", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a462", "title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a065", "title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a788", "title": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a788/1tuAHZj29Q4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuAHZj29Q4", "doi": "10.1109/VR50410.2021.00106", "title": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality", "normalizedTitle": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality", "abstract": "Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically mis-estimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars' embodied cues in Social VR are often scaled, e.g., by making one's head bigger or one's voice louder, to make social cues more pronounced over longer distances. In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human-subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.", "abstracts": [ { "abstractType": "Regular", "content": "Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically mis-estimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars' embodied cues in Social VR are often scaled, e.g., by making one's head bigger or one's voice louder, to make social cues more pronounced over longer distances. In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human-subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically mis-estimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars' embodied cues in Social VR are often scaled, e.g., by making one's head bigger or one's voice louder, to make social cues more pronounced over longer distances. In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human-subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.", "fno": "255600a788", "keywords": [ "Avatars", "Distance Perception", "Scaled Embodied Cues", "Social Virtual Reality", "Distance Estimation", "Geometrically Accurate Virtual Objects", "Environments Users", "Mis Estimate Distances", "Social Behaviors", "Avatars", "Social Cues", "Visual Head Scale", "Verbal Volume Scale", "Distance Judgments", "Social VR Avatar Representations", "Visualization", "Three Dimensional Displays", "Avatars", "Design Methodology", "Estimation", "Aerospace Electronics", "User Interfaces", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality", "Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality", "Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods User Studies" ], "authors": [ { "affiliation": "University of Central Florida", "fullName": "Zubin Choudhary", "givenName": "Zubin", "surname": "Choudhary", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Matthew Gottsacker", "givenName": "Matthew", "surname": "Gottsacker", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Kangsoo Kim", "givenName": "Kangsoo", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Ryan Schubert", "givenName": "Ryan", "surname": "Schubert", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Jeanine Stefanucci", "givenName": "Jeanine", "surname": "Stefanucci", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Gerd Bruder", "givenName": "Gerd", "surname": "Bruder", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida", "fullName": "Gregory F. Welch", "givenName": "Gregory F.", "surname": "Welch", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "788-797", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "255600a778", "articleId": "1tuBngWRAC4", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a798", "articleId": "1tuAAF9peSc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/04/07867778", "title": "Gaze-Sensitive Virtual Reality Based Social Communication Platform for Individuals with Autism", "doi": null, "abstractUrl": "/journal/ta/2018/04/07867778/17D45XwUAMY", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a552", "title": "Persuasive Vibrations: Effects of Speech-Based Vibrations on Persuasion, Leadership, and Co-Presence During Verbal Communication in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a552/1MNgYjAysYU", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a571", "title": "Exploring the Social Influence of Virtual Humans Unintentionally Conveying Conflicting Emotions", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a571/1MNgnShKE7e", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798348", "title": "Individual Differences in Embodied Distance Estimation in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798348/1cJ0H4fRjBS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089620", "title": "Virtual Big Heads: Analysis of Human Perception and Comfort of Head Scales in Social Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089620/1jIxaTvTkm4", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090445", "title": "It Is Complicated: Interacting with Children in Social Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090445/1jIxz87dXEs", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a068", "title": "Verbal Mimicry Predicts Social Distance and Social Attraction to an Outgroup Member in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a068/1qpzC44fheg", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a484", "title": "Personal Space Evaluation and Protection in Social VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a484/1tnXe7x7IGI", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a715", "title": "[DC] Privacy in VR: Empowering Users with Emotional Privacy from Verbal and Non-verbal Behavior of Their Avatars", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a715/1tnXsX6EMBa", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwHyZZX", "title": "2013 IEEE Frontiers in Education Conference (FIE)", "acronym": "fie", "groupId": "1000297", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNvm6VGu", "doi": "10.1109/FIE.2013.6684814", "title": "Interactive sketching in multi-touch digital books. A prototype for technical graphics", "normalizedTitle": "Interactive sketching in multi-touch digital books. A prototype for technical graphics", "abstract": "In this paper, we present a functional prototype of an interactive multi-touch book with drawing capabilities, intended to enhance the understanding of engineering graphics concepts and improve visualization skills. Our multi-touch book combines textual elements with rich media content and interactive exercises to allow students to practice technical sketching in an environment that simulates traditional paper-based drawings and tools. Additionally, finished drawings can be submitted to the instructor via email directly from the digital book, which facilitates management tasks. A comparative study of traditional and digital sketching using our prototype was conducted with a small group of participants to evaluate the effectiveness of the tool. Preliminary results show positive reactions and acceptance.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a functional prototype of an interactive multi-touch book with drawing capabilities, intended to enhance the understanding of engineering graphics concepts and improve visualization skills. Our multi-touch book combines textual elements with rich media content and interactive exercises to allow students to practice technical sketching in an environment that simulates traditional paper-based drawings and tools. Additionally, finished drawings can be submitted to the instructor via email directly from the digital book, which facilitates management tasks. A comparative study of traditional and digital sketching using our prototype was conducted with a small group of participants to evaluate the effectiveness of the tool. Preliminary results show positive reactions and acceptance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a functional prototype of an interactive multi-touch book with drawing capabilities, intended to enhance the understanding of engineering graphics concepts and improve visualization skills. Our multi-touch book combines textual elements with rich media content and interactive exercises to allow students to practice technical sketching in an environment that simulates traditional paper-based drawings and tools. Additionally, finished drawings can be submitted to the instructor via email directly from the digital book, which facilitates management tasks. A comparative study of traditional and digital sketching using our prototype was conducted with a small group of participants to evaluate the effectiveness of the tool. Preliminary results show positive reactions and acceptance.", "fno": "06684814", "keywords": [ "Electronic Publishing", "Prototypes", "Multimedia Communication", "Visualization", "Tablet Computers", "Education", "Mobile Learning", "Multi Touch Book", "Virtual Sketching", "Interactive Book" ], "authors": [ { "affiliation": "Dept. de Expresion Grafica en Arquitectura e Ing., Univ. de La Laguna, La Laguna, Spain", "fullName": "Jorge de la Torre", "givenName": "Jorge", "surname": "de la Torre", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. de Expresion Grafica en Arquitectura e Ing., Univ. de La Laguna, La Laguna, Spain", "fullName": "Jose Luis Saorin", "givenName": "Jose Luis", "surname": "Saorin", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. de Investig. en Bioingenieria y Tecnol. Orientada, al Ser Humano (I3BH), Univ. Politec. de Valencia, Valencia, Spain", "fullName": "Manuel Contero", "givenName": "Manuel", "surname": "Contero", "__typename": "ArticleAuthorType" }, { "affiliation": "Eng. Design Graphics, Texas A&M Univ., College Station, TX, USA", "fullName": "Jorge Dorribo-Camba", "givenName": "Jorge", "surname": "Dorribo-Camba", "__typename": "ArticleAuthorType" } ], "idPrefix": "fie", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "190-194", "year": "2013", "issn": "0190-5848", "isbn": "978-1-4673-5261-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06684813", "articleId": "12OmNxd4tzq", "__typename": "AdjacentArticleType" }, "next": { "fno": "06684815", "articleId": "12OmNznkJYz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icicta/2015/7644/0/7644a989", "title": "The Interactive Design of the E-Books for Children in the \"Screen Reader Age\"", "doi": null, "abstractUrl": "/proceedings-article/icicta/2015/7644a989/12OmNwDACsg", "parentPublication": { "id": "proceedings/icicta/2015/7644/0", "title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2013/5009/0/5009a516", "title": "The Roles of Electronic Books in the Transformation of Learning and Instruction", "doi": null, "abstractUrl": "/proceedings-article/icalt/2013/5009a516/12OmNwcl7Bv", "parentPublication": { "id": "proceedings/icalt/2013/5009/0", "title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543514", "title": "An integrated image and sketching environment for archaeological sites", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543514/12OmNwnYG1F", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-computing/2013/5047/0/5047a220", "title": "For Digitization of Photographic Books", "doi": null, "abstractUrl": "/proceedings-article/culture-computing/2013/5047a220/12OmNxGSlYT", "parentPublication": { "id": "proceedings/culture-computing/2013/5047/0", "title": "2013 International Conference on Culture and Computing (Culture Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cts/2016/2300/0/07871004", "title": "Sketching Gesture-Based Applications in a Collaborative Working Environment with Wall-Sized Displays", "doi": null, "abstractUrl": "/proceedings-article/cts/2016/07871004/12OmNxbEtOu", "parentPublication": { "id": "proceedings/cts/2016/2300/0", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2014/2555/0/06816756", "title": "dbTouch in action database kernels for touch-based data exploration", "doi": null, "abstractUrl": "/proceedings-article/icde/2014/06816756/12OmNzEmFEu", "parentPublication": { "id": "proceedings/icde/2014/2555/0", "title": "2014 IEEE 30th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2018/5725/0/572501a093", "title": "Sketching with a Purpose: Moving from Supporting Modeling to Supporting Software Engineering Activities", "doi": null, "abstractUrl": "/proceedings-article/chase/2018/572501a093/13bd1gJ1v0T", "parentPublication": { "id": "proceedings/chase/2018/5725/0", "title": "2018 IEEE/ACM 11th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2018/04/08506443", "title": "Publishing a Computer Graphics Book With Prototype Desktop Publishing Tools", "doi": null, "abstractUrl": "/magazine/an/2018/04/08506443/17D45Xh13tX", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2020/8432/0/843200a154", "title": "Sketch-Based Interaction for Planning-Based Interactive Storytelling", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2020/843200a154/1pQIKYRlYTm", "parentPublication": { "id": "proceedings/sbgames/2020/8432/0", "title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2020/7624/0/762400b130", "title": "&#x201C;Touch a Paper&#x201D; System Design for Reading Utilizing Physical Touch", "doi": null, "abstractUrl": "/proceedings-article/csci/2020/762400b130/1uGZ58qa0Rq", "parentPublication": { "id": "proceedings/csci/2020/7624/0", "title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qRNrlo577W", "title": "2020 IEEE Visualization Conference (VIS)", "acronym": "vis", "groupId": "1001944", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qRNXFy3oac", "doi": "10.1109/VIS47514.2020.00036", "title": "Representing Real-Time Multi-User Collaboration in Visualizations", "normalizedTitle": "Representing Real-Time Multi-User Collaboration in Visualizations", "abstract": "Establishing common ground and maintaining shared awareness amongst participants is a key challenge in collaborative visualization. For real-time collaboration, existing work has primarily focused on synchronizing constituent visualizations - an approach that makes it difficult for users to work independently, or selectively attend to their collaborators' activity. To address this gap, we introduce a design space for representing synchronous multi-user collaboration in visualizations defined by two orthogonal axes: situatedness, or whether collaborators' interactions are overlaid on or shown outside of a user's view, and specificity, or whether collaborators are depicted through abstract, generic representations or through specific means customized for the given visualization. We populate this de-sign space with a variety of examples including generic and custom synchronized cursors, and user legends that collect these cursors together or reproduce collaborators' views as thumbnails. To build common ground, users can interact with these representations by peeking to take a quick look at a collaborator's view, tracking to follow along with a collaborator in real-time, and forking to independently explore the visualization based on a collaborator's work. We present a reference implementation of a wrapper library that converts interactive Vega-Lite charts into collaborative visualizations. We find that our approach affords synchronous collaboration across an expressive range of visual designs and interaction techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Establishing common ground and maintaining shared awareness amongst participants is a key challenge in collaborative visualization. For real-time collaboration, existing work has primarily focused on synchronizing constituent visualizations - an approach that makes it difficult for users to work independently, or selectively attend to their collaborators' activity. To address this gap, we introduce a design space for representing synchronous multi-user collaboration in visualizations defined by two orthogonal axes: situatedness, or whether collaborators' interactions are overlaid on or shown outside of a user's view, and specificity, or whether collaborators are depicted through abstract, generic representations or through specific means customized for the given visualization. We populate this de-sign space with a variety of examples including generic and custom synchronized cursors, and user legends that collect these cursors together or reproduce collaborators' views as thumbnails. To build common ground, users can interact with these representations by peeking to take a quick look at a collaborator's view, tracking to follow along with a collaborator in real-time, and forking to independently explore the visualization based on a collaborator's work. We present a reference implementation of a wrapper library that converts interactive Vega-Lite charts into collaborative visualizations. We find that our approach affords synchronous collaboration across an expressive range of visual designs and interaction techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Establishing common ground and maintaining shared awareness amongst participants is a key challenge in collaborative visualization. For real-time collaboration, existing work has primarily focused on synchronizing constituent visualizations - an approach that makes it difficult for users to work independently, or selectively attend to their collaborators' activity. To address this gap, we introduce a design space for representing synchronous multi-user collaboration in visualizations defined by two orthogonal axes: situatedness, or whether collaborators' interactions are overlaid on or shown outside of a user's view, and specificity, or whether collaborators are depicted through abstract, generic representations or through specific means customized for the given visualization. We populate this de-sign space with a variety of examples including generic and custom synchronized cursors, and user legends that collect these cursors together or reproduce collaborators' views as thumbnails. To build common ground, users can interact with these representations by peeking to take a quick look at a collaborator's view, tracking to follow along with a collaborator in real-time, and forking to independently explore the visualization based on a collaborator's work. We present a reference implementation of a wrapper library that converts interactive Vega-Lite charts into collaborative visualizations. We find that our approach affords synchronous collaboration across an expressive range of visual designs and interaction techniques.", "fno": "801400a146", "keywords": [ "Data Visualisation", "Groupware", "Shared Awareness", "Collaborative Visualization", "Real Time Collaboration", "Constituent Visualizations", "Collaborators", "Design Space", "Synchronous Multiuser Collaboration", "Abstract Representations", "Generic Representations", "Given Visualization", "Generic Cursors", "Custom Synchronized Cursors", "User Legends", "Visual Designs", "Interaction Techniques", "Real Time Multiuser Collaboration", "Visualization", "Collaboration", "Prototypes", "Switches", "Tools", "Real Time Systems", "Synchronization", "Human Centered Computing", "Visualization", "Visualization Systems And Tools" ], "authors": [ { "affiliation": "MIT CSAIL", "fullName": "Rupayan Neogy", "givenName": "Rupayan", "surname": "Neogy", "__typename": "ArticleAuthorType" }, { "affiliation": "MIT CSAIL", "fullName": "Jonathan Zong", "givenName": "Jonathan", "surname": "Zong", "__typename": "ArticleAuthorType" }, { "affiliation": "MIT CSAIL", "fullName": "Arvind Satyanarayan", "givenName": "Arvind", "surname": "Satyanarayan", "__typename": "ArticleAuthorType" } ], "idPrefix": "vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "146-150", "year": "2020", "issn": null, "isbn": "978-1-7281-8014-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1qRNR4CBJbG", "name": "pvis202080140-09331317s1-mm_801400a146.zip", "size": "84.5 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvis202080140-09331317s1-mm_801400a146.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "801400a141", "articleId": "1qRNNrMSIrm", "__typename": "AdjacentArticleType" }, "next": { "fno": "801400a151", "articleId": "1qRNOUZefuw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vlhcc/2015/7457/0/07357201", "title": "Codechella: Multi-user program visualizations for real-time tutoring and collaborative learning", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2015/07357201/12OmNANkoiN", "parentPublication": { "id": "proceedings/vlhcc/2015/7457/0", "title": "2015 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456001", "title": "The Degree Distribution of Generalized Collaboration Networks with Preferential Attachment", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456001/12OmNvT2pc5", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2016/4607/0/4607a339", "title": "Inferred Awareness to Support Mixed-Activity Collaboration", "doi": null, "abstractUrl": "/proceedings-article/cic/2016/4607a339/12OmNx4Q6Gq", "parentPublication": { "id": "proceedings/cic/2016/4607/0", "title": "2016 IEEE 2nd International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cts/2016/2300/0/07871022", "title": "Tele-Board Prototyper - Distributed 3D Modeling in a Web-Based Real-Time Collaboration System", "doi": null, "abstractUrl": "/proceedings-article/cts/2016/07871022/12OmNyuy9RX", "parentPublication": { "id": "proceedings/cts/2016/2300/0", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2015/7287/0/7287b107", "title": "A Cloud-Based Platform for Supporting Research Collaboration", "doi": null, "abstractUrl": "/proceedings-article/cloud/2015/7287b107/12OmNzcPAsy", "parentPublication": { "id": "proceedings/cloud/2015/7287/0", "title": "2015 IEEE 8th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017606", "title": "Active Reading of Visualizations", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017606/13rRUyYSWl5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a022", "title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2020/06/09194322", "title": "Towards Trust-Aware Collaborative Business Processes: An Approach to Identify Uncertainty", "doi": null, "abstractUrl": "/magazine/ic/2020/06/09194322/1n0E7HwjAYw", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a509", "title": "View Splicing for Effective VR Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a509/1pysyoGwmze", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a532", "title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qRNrlo577W", "title": "2020 IEEE Visualization Conference (VIS)", "acronym": "vis", "groupId": "1001944", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qRO3wCZWAE", "doi": "10.1109/VIS47514.2020.00030", "title": "Trrack: A Library for Provenance-Tracking in Web-Based Visualizations", "normalizedTitle": "Trrack: A Library for Provenance-Tracking in Web-Based Visualizations", "abstract": "Provenance-tracking is widely acknowledged as an important feature of visualization systems. By tracking provenance data, visualization designers can provide a wide variety of functionality, ranging from action recovery (undo/redo), reproducibility, collaboration and sharing, to logging in support of quantitative and longitudinal evaluation. However, no widely used library that can provide that functionality is current available. As a consequence, visualization designers either develop ad hoc solutions that are rarely comprehensive, or do not track provenance at all. In this paper, we introduce a web-based software library - Trrack - that is designed for easy integration in existing or future visualization systems. Trrack supports a wide range of use cases, from simple action recovery, to capturing intent and reasoning, and can be used to share states with collaborators and store provenance on a server. Trrack also includes an optional provenance visualization component that supports annotation of states and aggregation of events.", "abstracts": [ { "abstractType": "Regular", "content": "Provenance-tracking is widely acknowledged as an important feature of visualization systems. By tracking provenance data, visualization designers can provide a wide variety of functionality, ranging from action recovery (undo/redo), reproducibility, collaboration and sharing, to logging in support of quantitative and longitudinal evaluation. However, no widely used library that can provide that functionality is current available. As a consequence, visualization designers either develop ad hoc solutions that are rarely comprehensive, or do not track provenance at all. In this paper, we introduce a web-based software library - Trrack - that is designed for easy integration in existing or future visualization systems. Trrack supports a wide range of use cases, from simple action recovery, to capturing intent and reasoning, and can be used to share states with collaborators and store provenance on a server. Trrack also includes an optional provenance visualization component that supports annotation of states and aggregation of events.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provenance-tracking is widely acknowledged as an important feature of visualization systems. By tracking provenance data, visualization designers can provide a wide variety of functionality, ranging from action recovery (undo/redo), reproducibility, collaboration and sharing, to logging in support of quantitative and longitudinal evaluation. However, no widely used library that can provide that functionality is current available. As a consequence, visualization designers either develop ad hoc solutions that are rarely comprehensive, or do not track provenance at all. In this paper, we introduce a web-based software library - Trrack - that is designed for easy integration in existing or future visualization systems. Trrack supports a wide range of use cases, from simple action recovery, to capturing intent and reasoning, and can be used to share states with collaborators and store provenance on a server. Trrack also includes an optional provenance visualization component that supports annotation of states and aggregation of events.", "fno": "801400a116", "keywords": [ "Data Integrity", "Data Visualisation", "Internet", "Software Libraries", "Web Based Visualization", "Trrack", "Provenance Visualization", "Web Based Software Library", "Provenance Data Tracking", "Visualization", "Software Libraries", "Annotations", "Data Visualization", "Collaboration", "Tools", "Servers", "Human Centered Computing", "Visualization", "Visualization Systems And Tools" ], "authors": [ { "affiliation": "University of Utah", "fullName": "Zach Cutler", "givenName": "Zach", "surname": "Cutler", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Kiran Gadhave", "givenName": "Kiran", "surname": "Gadhave", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Alexander Lex", "givenName": "Alexander", "surname": "Lex", "__typename": "ArticleAuthorType" } ], "idPrefix": "vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "116-120", "year": "2020", "issn": null, "isbn": "978-1-7281-8014-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "801400a111", "articleId": "1qRNP6eEG52", "__typename": "AdjacentArticleType" }, "next": { "fno": "801400a121", "articleId": "1qROQI8N61O", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sbgames/2017/4846/0/484601a066", "title": "Capturing Game Telemetry with Provenance", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2017/484601a066/12OmNAqU4Xh", "parentPublication": { "id": "proceedings/sbgames/2017/4846/0", "title": "2017 16th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2016/1483/0/1483a025", "title": "INSPECTOR: Data Provenance Using Intel Processor Trace (PT)", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2016/1483a025/12OmNy5zsuS", "parentPublication": { "id": "proceedings/icdcs/2016/1483/0", "title": "2016 IEEE 36th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2018/02/07909036", "title": "A Templating System to Generate Provenance", "doi": null, "abstractUrl": "/journal/ts/2018/02/07909036/13rRUxNW1Vz", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2007/05/c5082", "title": "Provenance for Visualizations: Reproducibility and Beyond", "doi": null, "abstractUrl": "/magazine/cs/2007/05/c5082/13rRUxjyWZt", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08500765", "title": "Enhancing Web-based Analytics Applications through Provenance", "doi": null, "abstractUrl": "/journal/tg/2019/01/08500765/17D45WYQJ6B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2018/0441/0/08703970", "title": "A Data Provenance Visualization Approach", "doi": null, "abstractUrl": "/proceedings-article/skg/2018/08703970/19JEc4BmzCw", "parentPublication": { "id": "proceedings/skg/2018/0441/0", "title": "2018 14th International Conference on Semantics, Knowledge and Grids (SKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rose/2021/4474/0/447400a067", "title": "Inferred Interactive Controls Through Provenance Tracking of ROS Message Data", "doi": null, "abstractUrl": "/proceedings-article/rose/2021/447400a067/1v6a5mkrzeE", "parentPublication": { "id": "proceedings/rose/2021/4474/0", "title": "2021 IEEE/ACM 3rd International Workshop on Robotics Software Engineering (RoSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsn/2021/3572/0/357200a363", "title": "FIRestarter: Practical Software Crash Recovery with Targeted Library-level Fault Injection", "doi": null, "abstractUrl": "/proceedings-article/dsn/2021/357200a363/1vQDmKztCso", "parentPublication": { "id": "proceedings/dsn/2021/3572/0", "title": "2021 51st Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2021/3827/0/382700a021", "title": "Towards a Visual Approach for Representing Analytical Provenance in Exploration Processes", "doi": null, "abstractUrl": "/proceedings-article/iv/2021/382700a021/1y4oID1DpaU", "parentPublication": { "id": "proceedings/iv/2021/3827/0", "title": "2021 25th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7I", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "acronym": "icmtma", "groupId": "1002837", "volume": "2", "displayVolume": "2", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDSdoq", "doi": "10.1109/ICMTMA.2009.158", "title": "Finite Element/Infinite Element Method for Acoustic Scattering Problem", "normalizedTitle": "Finite Element/Infinite Element Method for Acoustic Scattering Problem", "abstract": "The acoustic scattering problem is solved and an example is calculated using the finite element/infinite element method. Firstly, the surface velocity is obtained by the incidence acoustic wave and the characteristic of structure. Then the surface pressure is solved using the finite element/infinite element method. Lastly, the frequency of the incidence acoustic wave changes for the error of this method. This problem can be solved by finite element/infinite element method efficiently. Furthermore, the solving error is relatively low in a large frequency range.", "abstracts": [ { "abstractType": "Regular", "content": "The acoustic scattering problem is solved and an example is calculated using the finite element/infinite element method. Firstly, the surface velocity is obtained by the incidence acoustic wave and the characteristic of structure. Then the surface pressure is solved using the finite element/infinite element method. Lastly, the frequency of the incidence acoustic wave changes for the error of this method. This problem can be solved by finite element/infinite element method efficiently. Furthermore, the solving error is relatively low in a large frequency range.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The acoustic scattering problem is solved and an example is calculated using the finite element/infinite element method. Firstly, the surface velocity is obtained by the incidence acoustic wave and the characteristic of structure. Then the surface pressure is solved using the finite element/infinite element method. Lastly, the frequency of the incidence acoustic wave changes for the error of this method. This problem can be solved by finite element/infinite element method efficiently. Furthermore, the solving error is relatively low in a large frequency range.", "fno": "3583b810", "keywords": [ "Acoustic Wave Scattering", "Finite Element Analysis", "Surface Acoustic Waves", "Infinite Element Method", "Acoustic Scattering Problem", "Surface Velocity", "Incidence Acoustic Wave", "Surface Pressure", "Finite Element Method", "Finite Element Methods", "Acoustic Scattering", "Surface Acoustic Waves", "Acoustic Waves", "Frequency", "Acoustic Measurements", "Energy Measurement", "Mechatronics", "Automation", "Acoustical Engineering", "Finite Element", "Infinite Element", "Acoustic Scattering Problem", "Kirchhoff Integral" ], "authors": [ { "affiliation": null, "fullName": "Gaoju Song", "givenName": "Gaoju", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Energy & Environ. Eng., Zhongyuan Univ. of Technol., Zhengzhou, China", "fullName": "Ruiliang Yang", "givenName": "Ruiliang", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Energy & Environ. Eng., Zhongyuan Univ. of Technol., Zhengzhou, China", "fullName": "Caixia Zhu", "givenName": "Caixia", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Energy & Environ. Eng., Zhongyuan Univ. of Technol., Zhengzhou, China", "fullName": "Xiaowei Fan", "givenName": "Xiaowei", "surname": "Fan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmtma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "810-812", "year": "2009", "issn": "2157-1473", "isbn": "978-0-7695-3583-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3583b746", "articleId": "12OmNxVlTCi", "__typename": "AdjacentArticleType" }, "next": { "fno": "3583b750", "articleId": "12OmNxecS8Z", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2010/3962/1/3962a864", "title": "Coupling of FEM and Exterior/Interior Acoustic Field with BEM and Numerical Simulation of Vibro-Acoustic Response of Elastic Target", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962a864/12OmNB836RK", "parentPublication": { "id": "proceedings/icmtma/2010/3962/1", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcasia/1997/7901/0/79010611", "title": "Solution of viscoelastic scattering problems in linear acoustics using hp boundary/finite element method", "doi": null, "abstractUrl": "/proceedings-article/hpcasia/1997/79010611/12OmNBAIAPV", "parentPublication": { "id": "proceedings/hpcasia/1997/7901/0", "title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccce/2016/2427/0/2427a516", "title": "Investigation of Acoustic Wavelength Effects on Silicon Compatible Al Doped ZnO SAW Resonator", "doi": null, "abstractUrl": "/proceedings-article/iccce/2016/2427a516/12OmNBC8AAB", "parentPublication": { "id": "proceedings/iccce/2016/2427/0", "title": "2016 International Conference on Computer and Communication Engineering (ICCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1990/2180/1/00523310", "title": "DETERMINATION OF ACOUSTIC SCATTERING CROSS SECTION BASED ON NEAR-FIELD MEASUREMENTS", "doi": null, "abstractUrl": "/proceedings-article/acssc/1990/00523310/12OmNBrlPyf", "parentPublication": { "id": "proceedings/acssc/1990/2180/2", "title": "1990 Conference Record Twenty-Fourth Asilomar Conference on Signals, Systems and Computers, 1990.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifita/2009/3600/2/3600b081", "title": "New Calculational Method for Acoustic Reconstructing", "doi": null, "abstractUrl": "/proceedings-article/ifita/2009/3600b081/12OmNCmGNN5", "parentPublication": { "id": "proceedings/ifita/2009/3600/2", "title": "2009 International Forum on Information Technology and Applications (IFITA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/1/4077a386", "title": "A Vibro-Acoustic Coupling Analysis of Bus Passenger Compartment Based on the Finite Element Method", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077a386/12OmNviZlty", "parentPublication": { "id": "proceedings/icicta/2010/4077/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05750991", "title": "Study on Optimization and Performance of Four-element Acoustic Sensors Array", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750991/12OmNwNOaK8", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/2/01326274", "title": "Performance of Doppler estimation for acoustic sources with atmospheric scattering", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326274/12OmNxV4ivb", "parentPublication": { "id": "proceedings/icassp/2004/8484/2", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1993/3560/0/00522737", "title": "Acceleration of the modal series in the Neumann scattering problem for a hemispherical shell", "doi": null, "abstractUrl": "/proceedings-article/ssst/1993/00522737/12OmNzZEApj", "parentPublication": { "id": "proceedings/ssst/1993/3560/0", "title": "1993 (25th) Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b327", "title": "Simulation and Analysis of the Cabin Typical Structure under Vibro-acoustic Combined Environment", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b327/12OmNzmclJs", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx0A7Jg", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "acronym": "icmtma", "groupId": "1002837", "volume": "3", "displayVolume": "3", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNxT56zR", "doi": "10.1109/ICMTMA.2011.838", "title": "Underwater Broadband Signal Waveform Fast Prediction Method in Shallow Water with a Thermocline", "normalizedTitle": "Underwater Broadband Signal Waveform Fast Prediction Method in Shallow Water with a Thermocline", "abstract": "For the case of transmission characteristic in shallow water channel, broadband acoustic model have to be considered. A rapid numerical prediction theory of underwater broadband signal waveform is studied in this paper. During building broadband acoustic model in shallow water with a thermo cline, based on beam-displacement ray-mode theory (BDRM), approximate expansion of broadband acoustic model with respect to frequency and model parallelization will be used for rapid and accurate broadband signal waveform prediction. According to compare result, it offers a satisfactory degree of accuracy and the calculating speed has been improved comparing with conventional mode method.", "abstracts": [ { "abstractType": "Regular", "content": "For the case of transmission characteristic in shallow water channel, broadband acoustic model have to be considered. A rapid numerical prediction theory of underwater broadband signal waveform is studied in this paper. During building broadband acoustic model in shallow water with a thermo cline, based on beam-displacement ray-mode theory (BDRM), approximate expansion of broadband acoustic model with respect to frequency and model parallelization will be used for rapid and accurate broadband signal waveform prediction. According to compare result, it offers a satisfactory degree of accuracy and the calculating speed has been improved comparing with conventional mode method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For the case of transmission characteristic in shallow water channel, broadband acoustic model have to be considered. A rapid numerical prediction theory of underwater broadband signal waveform is studied in this paper. During building broadband acoustic model in shallow water with a thermo cline, based on beam-displacement ray-mode theory (BDRM), approximate expansion of broadband acoustic model with respect to frequency and model parallelization will be used for rapid and accurate broadband signal waveform prediction. According to compare result, it offers a satisfactory degree of accuracy and the calculating speed has been improved comparing with conventional mode method.", "fno": "4296f072", "keywords": [ "Acoustic Propagation", "Broadband Model", "Frequency Domain Expansion", "Parallel Computation" ], "authors": [ { "affiliation": null, "fullName": "Tang Shuai", "givenName": "Tang", "surname": "Shuai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Da Lianglong", "givenName": "Da", "surname": "Lianglong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xie Jun", "givenName": "Xie", "surname": "Jun", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmtma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-01-01T00:00:00", "pubType": "proceedings", "pages": "1072-1075", "year": "2011", "issn": null, "isbn": "978-0-7695-4296-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4296f068", "articleId": "12OmNCfSqLG", "__typename": "AdjacentArticleType" }, "next": { "fno": "4296f076", "articleId": "12OmNARiM4G", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icise/2009/3887/0/pid978650", "title": "Echo Waveform Prediction Algorithm for a Target in the Shallow Water Wave-Guide", "doi": null, "abstractUrl": "/proceedings-article/icise/2009/pid978650/12OmNAo45KV", "parentPublication": { "id": "proceedings/icise/2009/3887/0", "title": "Information Science and Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcfta/2011/4560/0/4560a203", "title": "A Robust Wide Synchronous Digital Chaotic Communication Scheme in Shallow Water Channel", "doi": null, "abstractUrl": "/proceedings-article/iwcfta/2011/4560a203/12OmNCbCs1k", "parentPublication": { "id": "proceedings/iwcfta/2011/4560/0", "title": "Chaos-Fractals Theories and Applications, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2002/7402/3/05745260", "title": "Broadband passive range estimation using music", "doi": null, "abstractUrl": "/proceedings-article/icassp/2002/05745260/12OmNwFicT2", "parentPublication": { "id": "proceedings/icassp/2002/7402/3", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccce/2014/7635/0/7635a080", "title": "Underwater Acoustic Noise Characteristics of Shallow Water in Tropical Seas", "doi": null, "abstractUrl": "/proceedings-article/iccce/2014/7635a080/12OmNwlZu4L", "parentPublication": { "id": "proceedings/iccce/2014/7635/0", "title": "2014 International Conference on Computer & Communication Engineering (ICCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1993/4120/0/00342352", "title": "Adaptive equalization techniques for interference suppression in shallow water acoustic telemetry channels", "doi": null, "abstractUrl": "/proceedings-article/acssc/1993/00342352/12OmNxXCGGi", "parentPublication": { "id": "proceedings/acssc/1993/4120/0", "title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1988/9999/1/00754030", "title": "Broadband Matched Field Processing", "doi": null, "abstractUrl": "/proceedings-article/acssc/1988/00754030/12OmNxecS5S", "parentPublication": { "id": "proceedings/acssc/1988/9999/1", "title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1988/9999/0/00197205", "title": "Experiments on automatic classification of shallow water acoustic signal sources using two pattern recognition methods", "doi": null, "abstractUrl": "/proceedings-article/icassp/1988/00197205/12OmNyugyZp", "parentPublication": { "id": "proceedings/icassp/1988/9999/0", "title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2017/4662/0/08388316", "title": "Noise characteristics in extreme shallow water with relation to ship maneuvering parameters", "doi": null, "abstractUrl": "/proceedings-article/isspit/2017/08388316/12OmNywfKyd", "parentPublication": { "id": "proceedings/isspit/2017/4662/0", "title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sutc/2010/4049/0/4049a095", "title": "Channel Equalization Based on Data Reuse LMS Algorithm for Shallow Water Acoustic Communication", "doi": null, "abstractUrl": "/proceedings-article/sutc/2010/4049a095/12OmNzIUfM7", "parentPublication": { "id": "proceedings/sutc/2010/4049/0", "title": "Sensor Networks, Ubiquitous, and Trustworthy Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedme/2021/3596/0/359600a139", "title": "Design of a sampling device for shallow water sediment", "doi": null, "abstractUrl": "/proceedings-article/icedme/2021/359600a139/1tMPO6Fvub6", "parentPublication": { "id": "proceedings/icedme/2021/3596/0", "title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs4S8wm", "title": "2010 22nd International Symposium on Computer Architecture and High Performance Computing Workshops", "acronym": "sbac-padw", "groupId": "1800191", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNxUMHqw", "doi": "10.1109/SBAC-PADW.2010.16", "title": "Performance Evaluation of Optimized Implementations of Finite Difference Method for Wave Propagation Problems on GPU Architecture", "normalizedTitle": "Performance Evaluation of Optimized Implementations of Finite Difference Method for Wave Propagation Problems on GPU Architecture", "abstract": "The scattering of acoustic waves in non-homogeneous media has been of practical interest for the petroleum industry, mainly in the determination of new oil deposits. A family of computational models that represent this phenomenon is based on finite difference methods. The simulation of these phenomena demands a high computational cost. In this work we employ GPU for the development of solvers for a 2D wave propagation problem with finite difference methods. Although there are many related works that use the same implementation presented in this paper, we propose a detailed and novel performance and memory bottleneck analysis for this hardware architecture.", "abstracts": [ { "abstractType": "Regular", "content": "The scattering of acoustic waves in non-homogeneous media has been of practical interest for the petroleum industry, mainly in the determination of new oil deposits. A family of computational models that represent this phenomenon is based on finite difference methods. The simulation of these phenomena demands a high computational cost. In this work we employ GPU for the development of solvers for a 2D wave propagation problem with finite difference methods. Although there are many related works that use the same implementation presented in this paper, we propose a detailed and novel performance and memory bottleneck analysis for this hardware architecture.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The scattering of acoustic waves in non-homogeneous media has been of practical interest for the petroleum industry, mainly in the determination of new oil deposits. A family of computational models that represent this phenomenon is based on finite difference methods. The simulation of these phenomena demands a high computational cost. In this work we employ GPU for the development of solvers for a 2D wave propagation problem with finite difference methods. Although there are many related works that use the same implementation presented in this paper, we propose a detailed and novel performance and memory bottleneck analysis for this hardware architecture.", "fno": "4276a007", "keywords": [ "GPU Architecture", "Finite Difference Method", "Wave Propagation Problem" ], "authors": [ { "affiliation": null, "fullName": "Diego Brandão", "givenName": "Diego", "surname": "Brandão", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Marcelo Zamith", "givenName": "Marcelo", "surname": "Zamith", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Esteban Clua", "givenName": "Esteban", "surname": "Clua", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anselmo Montenegro", "givenName": "Anselmo", "surname": "Montenegro", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "André Bulcão", "givenName": "André", "surname": "Bulcão", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daniel Madeira", "givenName": "Daniel", "surname": "Madeira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mauricio Kischinhevsky", "givenName": "Mauricio", "surname": "Kischinhevsky", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Regina C.P. Leal-Toledo", "givenName": "Regina C.P.", "surname": "Leal-Toledo", "__typename": "ArticleAuthorType" } ], "idPrefix": "sbac-padw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "7-12", "year": "2010", "issn": null, "isbn": "978-0-7695-4276-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4276a001", "articleId": "12OmNzaQotZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "4276a013", "articleId": "12OmNB0X8qA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pci/2011/4389/0/4389a253", "title": "Higher Order Finite Difference Preconditioned Scheme: A Multithreaded Approach", "doi": null, "abstractUrl": "/proceedings-article/pci/2011/4389a253/12OmNAoDijh", "parentPublication": { "id": "proceedings/pci/2011/4389/0", "title": "2011 15th Panhellenic Conference on Informatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2017/3050/0/08217957", "title": "Higher order finite difference modeling of cardiac propagation", "doi": null, "abstractUrl": "/proceedings-article/bibm/2017/08217957/12OmNBpVPUS", "parentPublication": { "id": "proceedings/bibm/2017/3050/0", "title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1992/2665/0/00712196", "title": "A Comparative Study of Compact Finite Volume Methods for the 2-D Difussion Equation with Finite Difference ADI and SOR", "doi": null, "abstractUrl": "/proceedings-article/ssst/1992/00712196/12OmNBpVQ97", "parentPublication": { "id": "proceedings/ssst/1992/2665/0", "title": "The 24th Southeastern Symposium on System Theory and The 3rd Annual Symposium on Communications, Signal Processing Expert Systems, and ASIC VLSI Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2011/4477/0/4477a567", "title": "Seismic Wave Propagation Simulation Using Accelerated Support Operator Rupture Dynamics on Multi-GPU", "doi": null, "abstractUrl": "/proceedings-article/cse/2011/4477a567/12OmNCf1Drg", "parentPublication": { "id": "proceedings/cse/2011/4477/0", "title": "2011 14th IEEE International Conference on Computational Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2008/3193/0/3193a253", "title": "Exploiting Intensive Multithreading for the Efficient Simulation of 3D Seismic Wave Propagation", "doi": null, "abstractUrl": "/proceedings-article/cse/2008/3193a253/12OmNrkBwIg", "parentPublication": { "id": "proceedings/cse/2008/3193/0", "title": "2008 11th IEEE International Conference on Computational Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2013/2096/0/06673202", "title": "Conservative finite-difference scheme for the problem of laser pulse propagation in a medium with third-order dispersion", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2013/06673202/12OmNvjgWnP", "parentPublication": { "id": "proceedings/ewdts/2013/2096/0", "title": "2013 11th East-West Design and Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2010/4039/0/4039a763", "title": "Asynchronous Communication Schemes for Finite Difference Methods on Multiple GPUs", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2010/4039a763/12OmNvqEvKJ", "parentPublication": { "id": "proceedings/ccgrid/2010/4039/0", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fpl/2011/4529/0/4529a006", "title": "Unifying Finite Difference Option-Pricing for Hardware Acceleration", "doi": null, "abstractUrl": "/proceedings-article/fpl/2011/4529a006/12OmNy3RRCU", "parentPublication": { "id": "proceedings/fpl/2011/4529/0", "title": "International Conference on Field Programmable Logic and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2009/3634/3/3634c074", "title": "Local One Dimensional Scheme for Viscous Wave Equations", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634c074/12OmNy6Zs2P", "parentPublication": { "id": "proceedings/icic/2009/3634/2", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1993/3560/0/00522823", "title": "A comparative study of compact finite volume methods for the 3-D diffusion equations with finite difference ADI and SOR", "doi": null, "abstractUrl": "/proceedings-article/ssst/1993/00522823/12OmNylsZXN", "parentPublication": { "id": "proceedings/ssst/1993/3560/0", "title": "1993 (25th) Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWcHee", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "acronym": "icassp", "groupId": "1000002", "volume": "4", "displayVolume": "4", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNxcdFWC", "doi": "10.1109/ICASSP.2004.1326754", "title": "Boundary conditions in a multi-dimensional digital waveguide mesh", "normalizedTitle": "Boundary conditions in a multi-dimensional digital waveguide mesh", "abstract": "The digital waveguide mesh is a modeling technique suitable for simulation of wave propagation in an acoustic system. Artificial boundary conditions are constructed for the digital waveguide mesh. Absorbing boundary conditions are evaluated and a new method for adjusting the reflection coefficient at values 0/spl les/r/spl les/1 is introduced. The frequency dependent error level of this method is minimized by the use of a second-order FIR filter.", "abstracts": [ { "abstractType": "Regular", "content": "The digital waveguide mesh is a modeling technique suitable for simulation of wave propagation in an acoustic system. Artificial boundary conditions are constructed for the digital waveguide mesh. Absorbing boundary conditions are evaluated and a new method for adjusting the reflection coefficient at values 0/spl les/r/spl les/1 is introduced. The frequency dependent error level of this method is minimized by the use of a second-order FIR filter.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The digital waveguide mesh is a modeling technique suitable for simulation of wave propagation in an acoustic system. Artificial boundary conditions are constructed for the digital waveguide mesh. Absorbing boundary conditions are evaluated and a new method for adjusting the reflection coefficient at values 0/spl les/r/spl les/1 is introduced. The frequency dependent error level of this method is minimized by the use of a second-order FIR filter.", "fno": "01326754", "keywords": [ "Acoustic Wave Reflection", "Acoustic Wave Propagation", "Digital Simulation", "Minimisation", "FIR Filters", "Acoustic Waveguides", "Acoustics", "Multi Dimensional Digital Waveguide Mesh", "Artificial Boundary Conditions", "Acoustic Wave Propagation", "Reflection Coefficient", "Error Minimization", "Second Order FIR Filter", "Acoustic Modeling Technique", "Boundary Conditions", "Acoustic Waveguides", "Acoustic Waves", "Rectangular Waveguides", "Acoustic Reflection", "Solid Modeling", "Instruments", "Frequency", "Difference Equations", "Acoustical Engineering" ], "authors": [ { "affiliation": "Telecommun. Software & Multimedia Lab, Helsinki Univ. of Technol., Finland", "fullName": "A. Kelloniemi", "givenName": "A.", "surname": "Kelloniemi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "D.T. Murphy", "givenName": "D.T.", "surname": "Murphy", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "L. Savioja", "givenName": "L.", "surname": "Savioja", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "V. Valimaki", "givenName": "V.", "surname": "Valimaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "iv-25-iv-28 vol.4", "year": "2004", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01326753", "articleId": "12OmNxX3utn", "__typename": "AdjacentArticleType" }, "next": { "fno": "01326755", "articleId": "12OmNANkop6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/1999/5041/2/00759858", "title": "Reduction of the dispersion error in the interpolated digital waveguide mesh using frequency warping", "doi": null, "abstractUrl": "/proceedings-article/icassp/1999/00759858/12OmNBJw9ST", "parentPublication": { "id": "proceedings/icassp/1999/5041/2", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/1997/8227/0/82270109", "title": "Optimal broadcast in /spl alpha/-port wormhole-routed mesh networks", "doi": null, "abstractUrl": "/proceedings-article/icpads/1997/82270109/12OmNButq3f", "parentPublication": { "id": "proceedings/icpads/1997/8227/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1990/2038/0/00138131", "title": "Input impedance of a probe excited semi-infinite rectangular waveguide with a tuning post", "doi": null, "abstractUrl": "/proceedings-article/ssst/1990/00138131/12OmNqI04D5", "parentPublication": { "id": "proceedings/ssst/1990/2038/0", "title": "Proceedings The Twenty-Second Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisis/2014/4325/0/4325a313", "title": "Resonator in Two Dimensional Photonic Crystal Structure with Square Lattice by Metallic Pillars", "doi": null, "abstractUrl": "/proceedings-article/cisis/2014/4325a313/12OmNrMHOmO", "parentPublication": { "id": "proceedings/cisis/2014/4325/0", "title": "2014 Eighth International Conference on Complex, Intelligent and Software Intensive Systems (CISIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999914", "title": "Signal Integrity Modeling in Inhomogeneous Waveguide/PCB of Arbitrary Shape Using Broadband Green's Function", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999914/12OmNs59JDs", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apcase/2015/7588/0/7588a058", "title": "Development of Radial Waveguide Dividers with Large Number of Ports", "doi": null, "abstractUrl": "/proceedings-article/apcase/2015/7588a058/12OmNvy258J", "parentPublication": { "id": "proceedings/apcase/2015/7588/0", "title": "2015 Asia-Pacific Conference on Computer Aided System Engineering (APCASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bwcca/2014/4173/0/4173a357", "title": "Propagation Constant Measurement in Two Dimensional Post Array Waveguide with Triangular Lattice by Metallic Pillars", "doi": null, "abstractUrl": "/proceedings-article/bwcca/2014/4173a357/12OmNz2TCG9", "parentPublication": { "id": "proceedings/bwcca/2014/4173/0", "title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2021/2172/0/217200a159", "title": "Demonstration of Rectangular Waveguides by Microfabrication Technology", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2021/217200a159/1ANLuvUgbba", "parentPublication": { "id": "proceedings/wcmeim/2021/2172/0", "title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2022/9978/0/997800a471", "title": "Design and implementation of X-band waveguide slot antenna", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2022/997800a471/1Byev1Lk0Bq", "parentPublication": { "id": "proceedings/icmtma/2022/9978/0", "title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wceea/2022/5952/0/595200a113", "title": "Studies on the wall currents distributions of elliptical waveguide", "doi": null, "abstractUrl": "/proceedings-article/wceea/2022/595200a113/1J7WECNZdL2", "parentPublication": { "id": "proceedings/wceea/2022/5952/0", "title": "2022 International Conference on Wireless Communications, Electrical Engineering and Automation (WCEEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1lRlIYZhxG8", "title": "2019 4th International Conference on Communication and Information Systems (ICCIS)", "acronym": "iccis", "groupId": "1830624", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1lRlKMvW4fK", "doi": "10.1109/ICCIS49662.2019.00029", "title": "Analysis of Bioproteins by Propagation Signal Based on SAW Device", "normalizedTitle": "Analysis of Bioproteins by Propagation Signal Based on SAW Device", "abstract": "There are various bioproteins in our daily life. Humans can detect tens of thousands of bioproteins, such as odorant molecules. Nowadays, electronic noses are well-known as odor sensors that are widely used for many applications. Electrochemical methods, optical methods and acoustic techniques are used for odor sensors. Surface acoustic wave (SAW) devices, in which Rayleigh waves propagate, are studied for the odor sensors. Velocity of the SAW that is observed in the odor sensor depends on both a SAW device substrate and odorant mediums. This work is based on SAW device. In this work, we will describe the propagation characteristics of surface acoustic wave in viscoelastic model. we have to theoretically analysis the propagation velocity of surface acoustic wave propagating in SAW device. We determined a new mathematical formula for estimating the velocity of SAWs, which propagate in the SAW device. Also, we experimentally observed the propagation velocity of SAW device by different bioproteins. At the same concentration of protein, velocity of binding solution is depending on the density of odorant molecules. When the density increased, velocity of binding solution also increases.", "abstracts": [ { "abstractType": "Regular", "content": "There are various bioproteins in our daily life. Humans can detect tens of thousands of bioproteins, such as odorant molecules. Nowadays, electronic noses are well-known as odor sensors that are widely used for many applications. Electrochemical methods, optical methods and acoustic techniques are used for odor sensors. Surface acoustic wave (SAW) devices, in which Rayleigh waves propagate, are studied for the odor sensors. Velocity of the SAW that is observed in the odor sensor depends on both a SAW device substrate and odorant mediums. This work is based on SAW device. In this work, we will describe the propagation characteristics of surface acoustic wave in viscoelastic model. we have to theoretically analysis the propagation velocity of surface acoustic wave propagating in SAW device. We determined a new mathematical formula for estimating the velocity of SAWs, which propagate in the SAW device. Also, we experimentally observed the propagation velocity of SAW device by different bioproteins. At the same concentration of protein, velocity of binding solution is depending on the density of odorant molecules. When the density increased, velocity of binding solution also increases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "There are various bioproteins in our daily life. Humans can detect tens of thousands of bioproteins, such as odorant molecules. Nowadays, electronic noses are well-known as odor sensors that are widely used for many applications. Electrochemical methods, optical methods and acoustic techniques are used for odor sensors. Surface acoustic wave (SAW) devices, in which Rayleigh waves propagate, are studied for the odor sensors. Velocity of the SAW that is observed in the odor sensor depends on both a SAW device substrate and odorant mediums. This work is based on SAW device. In this work, we will describe the propagation characteristics of surface acoustic wave in viscoelastic model. we have to theoretically analysis the propagation velocity of surface acoustic wave propagating in SAW device. We determined a new mathematical formula for estimating the velocity of SAWs, which propagate in the SAW device. Also, we experimentally observed the propagation velocity of SAW device by different bioproteins. At the same concentration of protein, velocity of binding solution is depending on the density of odorant molecules. When the density increased, velocity of binding solution also increases.", "fno": "09151864", "keywords": [ "Acoustic Wave Propagation", "Biological Techniques", "Electronic Noses", "Mathematical Analysis", "Proteins", "Surface Acoustic Wave Sensors", "Velocity Measurement", "Viscoelasticity", "Odorant Molecule Density", "Viscoelastic Model", "Bioprotein Propagation", "Signal Propagation", "Surface Acoustic Wave Propagation", "Odorant Mediums", "Rayleigh Wave Propagation", "Surface Acoustic Wave Devices", "Odor Sensor", "SAW Device", "Surface Acoustic Wave Devices", "Surface Acoustic Waves", "Liquids", "Sensors", "Optical Surface Waves", "Substrates", "SAW Surface Acoustic Wave Device", "Rayleigh Wave", "Bioproteins", "Viscoelastic Model", "Cross Correlation Coefficients" ], "authors": [ { "affiliation": "Inner Mongolia University for Nationalities,College of Computer Science and Technology,Tongliao,China", "fullName": "Yuxia Yang", "givenName": "Yuxia", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Inner Mongolia University for Nationalities,College of Computer Science and Technology,Tongliao,China", "fullName": "Jingqing Jiang", "givenName": "Jingqing", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Inner Mongolia University for Nationalities,College of Computer Science and Technology,Tongliao,China", "fullName": "Tuya Tuya", "givenName": "Tuya", "surname": "Tuya", "__typename": "ArticleAuthorType" }, { "affiliation": "Inner Mongolia University for Nationalities,College of Computer Science and Technology,Tongliao,China", "fullName": "Chaoluomeng Chaoluomeng", "givenName": "Chaoluomeng", "surname": "Chaoluomeng", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "124-127", "year": "2019", "issn": null, "isbn": "978-1-7281-6297-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09151689", "articleId": "1lRlLsgFMw8", "__typename": "AdjacentArticleType" }, "next": { "fno": "09151472", "articleId": "1lRlJCxLNvy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682326", "title": "The SAW Gas Chromatograph and Its Applications in the Public Security", "doi": null, "abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682326/12OmNAIMO7F", "parentPublication": { "id": "proceedings/greencom-ithingscpscom/2013/5046/0", "title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccce/2016/2427/0/2427a516", "title": "Investigation of Acoustic Wavelength Effects on Silicon Compatible Al Doped ZnO SAW Resonator", "doi": null, "abstractUrl": "/proceedings-article/iccce/2016/2427a516/12OmNBC8AAB", "parentPublication": { "id": "proceedings/iccce/2016/2427/0", "title": "2016 International Conference on Computer and Communication Engineering (ICCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1990/2038/0/00138173", "title": "CAD tools for the design and analysis of surface acoustic wave chirp devices", "doi": null, "abstractUrl": "/proceedings-article/ssst/1990/00138173/12OmNCcbE7B", "parentPublication": { "id": "proceedings/ssst/1990/2038/0", "title": "Proceedings The Twenty-Second Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2016/3071/0/3071a618", "title": "Fabrication of SAW Devices with Dual Mode Frequency Response Using AlN and ZnO Thin Films", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a618/12OmNrHjqKh", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549409", "title": "Visual-olfactory presentation system using a miniaturized olfactory display based on SAW streaming and electroosmotic pumps", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549409/12OmNwDSduJ", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504712", "title": "Olfactory display using surface acoustic wave device and micropumps for wearable applications", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504712/12OmNzgwmQK", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2018/7036/0/703600a392", "title": "Study of In Vitro Diagnostic Preconcentration Technique via Surface Acoustic Wave Modified with the 54 MHz Communication Chip", "doi": null, "abstractUrl": "/proceedings-article/is3c/2018/703600a392/17QjJfqxwoU", "parentPublication": { "id": "proceedings/is3c/2018/7036/0", "title": "2018 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccpqt/2022/7020/0/702000a328", "title": "Research on Detection Technology of Bio-electronic Nose Based on SAW Devices", "doi": null, "abstractUrl": "/proceedings-article/ccpqt/2022/702000a328/1Iiu25VEU80", "parentPublication": { "id": "proceedings/ccpqt/2022/7020/0", "title": "2022 International Conference on Computing, Communication, Perception and Quantum Technology (CCPQT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icctec/2017/5784/0/578400a890", "title": "Design and Application of High Precision Differential SAW Sensor", "doi": null, "abstractUrl": "/proceedings-article/icctec/2017/578400a890/1cksaN9WhDG", "parentPublication": { "id": "proceedings/icctec/2017/5784/0", "title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089617", "title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089617/1jIxfcDz7Ak", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuBp2DGY1O", "doi": "10.1109/VR50410.2021.00111", "title": "Learning Acoustic Scattering Fields for Dynamic Interactive Sound Propagation", "normalizedTitle": "Learning Acoustic Scattering Fields for Dynamic Interactive Sound Propagation", "abstract": "We present a novel hybrid sound propagation algorithm for interactive applications. Our approach is designed for dynamic scenes and uses a neural network-based learned scattered field representation along with ray tracing to generate specular, diffuse, diffraction, and occlusion effects efficiently. We use geometric deep learning to approximate the acoustic scattering field using spherical harmonics. We use a large 3D dataset for training, and compare its accuracy with the ground truth generated using an accurate wave-based solver. The additional overhead of computing the learned scattered field at runtime is small and we demonstrate its interactive performance by generating plausible sound effects in dynamic scenes with diffraction and occlusion effects. We demonstrate the perceptual benefits of our approach based on an audio-visual user study.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel hybrid sound propagation algorithm for interactive applications. Our approach is designed for dynamic scenes and uses a neural network-based learned scattered field representation along with ray tracing to generate specular, diffuse, diffraction, and occlusion effects efficiently. We use geometric deep learning to approximate the acoustic scattering field using spherical harmonics. We use a large 3D dataset for training, and compare its accuracy with the ground truth generated using an accurate wave-based solver. The additional overhead of computing the learned scattered field at runtime is small and we demonstrate its interactive performance by generating plausible sound effects in dynamic scenes with diffraction and occlusion effects. We demonstrate the perceptual benefits of our approach based on an audio-visual user study.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel hybrid sound propagation algorithm for interactive applications. Our approach is designed for dynamic scenes and uses a neural network-based learned scattered field representation along with ray tracing to generate specular, diffuse, diffraction, and occlusion effects efficiently. We use geometric deep learning to approximate the acoustic scattering field using spherical harmonics. We use a large 3D dataset for training, and compare its accuracy with the ground truth generated using an accurate wave-based solver. The additional overhead of computing the learned scattered field at runtime is small and we demonstrate its interactive performance by generating plausible sound effects in dynamic scenes with diffraction and occlusion effects. We demonstrate the perceptual benefits of our approach based on an audio-visual user study.", "fno": "255600a835", "keywords": [ "Acoustic Wave Diffraction", "Acoustic Wave Propagation", "Acoustic Wave Scattering", "Learning Artificial Intelligence", "Neural Nets", "Ray Tracing", "Virtual Reality", "Geometric Deep Learning", "Acoustic Scattering Field", "Accurate Wave Based", "Interactive Performance", "Plausible Sound Effects", "Dynamic Scenes", "Occlusion Effects", "Dynamic Interactive Sound Propagation", "Novel Hybrid Sound Propagation Algorithm", "Interactive Applications", "Neural Network Based Learned Scattered Field Representation", "Specular Diffraction", "Diffuse Diffraction", "Training", "Three Dimensional Displays", "Runtime", "Diffraction", "Heuristic Algorithms", "Acoustic Scattering", "Virtual Reality", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality" ], "authors": [ { "affiliation": "University of Maryland at College Park", "fullName": "Zhenyu Tang", "givenName": "Zhenyu", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland at College Park", "fullName": "Hsien-Yu Meng", "givenName": "Hsien-Yu", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland at College Park", "fullName": "Dinesh Manocha", "givenName": "Dinesh", "surname": "Manocha", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "835-844", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuBokOEyYg", "name": "pvr202118380-09417734s1-mm_255600a835.zip", "size": "134 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417734s1-mm_255600a835.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a826", "articleId": "1tuBbGEUWm4", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a845", "articleId": "1tuAwEv4Tug", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sive/2014/5781/0/07006289", "title": "Wave-based sound propagation for VR applications", "doi": null, "abstractUrl": "/proceedings-article/sive/2014/07006289/12OmNAXglTR", "parentPublication": { "id": "proceedings/sive/2014/5781/0", "title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paap/2014/3845/0/3845a183", "title": "Sound Source Localization Algorithm Based on a Helmet-Mounted Microphone Array", "doi": null, "abstractUrl": "/proceedings-article/paap/2014/3845a183/12OmNBNM8Vk", "parentPublication": { "id": "proceedings/paap/2014/3845/0", "title": "2014 Sixth International Symposium on Parallel Architectures, Algorithms and Programming (PAAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/shpcc/1994/5680/0/00296699", "title": "ADEAS: a distributed environment for acoustic simulation", "doi": null, "abstractUrl": "/proceedings-article/shpcc/1994/00296699/12OmNviHK6y", "parentPublication": { "id": "proceedings/shpcc/1994/5680/0", "title": "Proceedings of IEEE Scalable High Performance Computing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2012/4652/0/4652a633", "title": "On Accuracy of Discrete Ray Tracing Method in Comparison with Rigorous Solutions", "doi": null, "abstractUrl": "/proceedings-article/waina/2012/4652a633/12OmNxbEtJX", "parentPublication": { "id": "proceedings/waina/2012/4652/0", "title": "2012 26th International Conference on Advanced Information Networking and Applications Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a794", "title": "Influences on Spatial Directivity of Acoustic Vector Sensor by Soft Spherical Boundary", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a794/12OmNyQYtq8", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574738", "title": "Visibility preprocessing suitable for virtual reality sound propagation with a moving receiver and multiple sources", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574738/12OmNzBOhLp", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1993/3560/0/00522737", "title": "Acceleration of the modal series in the Neumann scattering problem for a hemispherical shell", "doi": null, "abstractUrl": "/proceedings-article/ssst/1993/00522737/12OmNzZEApj", "parentPublication": { "id": "proceedings/ssst/1993/3560/0", "title": "1993 (25th) Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08307458", "title": "Diffraction Kernels for Interactive Sound Propagation in Dynamic Environments", "doi": null, "abstractUrl": "/journal/tg/2018/04/08307458/13rRUwh80Hk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07014276", "title": "WAVE: Interactive Wave-based Sound Propagation for Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2015/04/07014276/13rRUygT7yf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404495", "title": "Source and Listener Directivity for Interactive Wave-Based Sound Propagation", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404495/13rRUyogGAb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx19jVJ", "title": "15th IEEE International Conference on Program Comprehension (ICPC '07)", "acronym": "icpc", "groupId": "1003168", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNviHKjL", "doi": "10.1109/ICPC.2007.1", "title": "15 Years of Program Comprehension", "normalizedTitle": "15 Years of Program Comprehension", "abstract": "2007 marks the 15th anniversary of the IEEE Program Comprehension series of events. What began in 1992 as a small workshop co-located with the IEEE Conference on Software Maintenance (CSM?92) in Florida is now the preeminent international gathering for researchers exploring all aspects of program understanding. This success speaks to the long-term importance and the growing interest of the field of program comprehension in the broader software engineering context.", "abstracts": [ { "abstractType": "Regular", "content": "2007 marks the 15th anniversary of the IEEE Program Comprehension series of events. What began in 1992 as a small workshop co-located with the IEEE Conference on Software Maintenance (CSM?92) in Florida is now the preeminent international gathering for researchers exploring all aspects of program understanding. This success speaks to the long-term importance and the growing interest of the field of program comprehension in the broader software engineering context.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "2007 marks the 15th anniversary of the IEEE Program Comprehension series of events. What began in 1992 as a small workshop co-located with the IEEE Conference on Software Maintenance (CSM?92) in Florida is now the preeminent international gathering for researchers exploring all aspects of program understanding. This success speaks to the long-term importance and the growing interest of the field of program comprehension in the broader software engineering context.", "fno": "28600279", "keywords": [], "authors": [ { "affiliation": "Florida Institute of Technology", "fullName": "Scott Tilley", "givenName": "Scott", "surname": "Tilley", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-06-01T00:00:00", "pubType": "proceedings", "pages": "279-280", "year": "2007", "issn": "1063-6897", "isbn": "0-7695-2860-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "28600271", "articleId": "12OmNvA1h84", "__typename": "AdjacentArticleType" }, "next": { "fno": "28600281", "articleId": "12OmNzayNpA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wcre/2008/3429/0/3429a145", "title": "Integrative Levels of Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/wcre/2008/3429a145/12OmNASraNE", "parentPublication": { "id": "proceedings/wcre/2008/3429/0", "title": "2008 15th Working Conference on Reverse Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsee/2012/4647/1/4647a601", "title": "Overview of Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647a601/12OmNBziBc8", "parentPublication": { "id": "proceedings/iccsee/2012/4647/2", "title": "Computer Science and Electronics Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sera/2005/2297/0/22970392", "title": "A Cognitive Model for Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/sera/2005/22970392/12OmNwwd30w", "parentPublication": { "id": "proceedings/sera/2005/2297/0", "title": "Proceedings. Third ACIS International Conference on Software Engineering Research, Management and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2016/1855/5/07476769", "title": "Program Comprehension: Past, Present, and Future", "doi": null, "abstractUrl": "/proceedings-article/saner/2016/07476769/12OmNx9WSXe", "parentPublication": { "id": "saner/2016/1855/5", "title": "2016 IEEE 23rd International Conference on Software Analysis, Evolution, and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2017/0535/0/0535a308", "title": "Comprehending Studies on Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/icpc/2017/0535a308/12OmNzmLxOj", "parentPublication": { "id": "proceedings/icpc/2017/0535/0", "title": "2017 IEEE/ACM 25th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2002/1727/0/17270427", "title": "From System Comprehension to Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/compsac/2002/17270427/12OmNznCkYo", "parentPublication": { "id": "proceedings/compsac/2002/1727/0", "title": "Proceedings 26th Annual International Computer Software and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2008/3176/0/3176a283", "title": "Industrial Realities of Program Comprehension (IRPC 2008)", "doi": null, "abstractUrl": "/proceedings-article/icpc/2008/3176a283/12OmNznkK2S", "parentPublication": { "id": "proceedings/icpc/2008/3176/0", "title": "International Conference on Program Comprehension", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2018/5663/0/566301a496", "title": "A Neuro-Cognitive Perspective of Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2018/566301a496/13bd1eW2l97", "parentPublication": { "id": "proceedings/icse-companion/2018/5663/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2018/5638/0/563801a584", "title": "Measuring Program Comprehension: A Large-Scale Field Study with Professionals", "doi": null, "abstractUrl": "/proceedings-article/icse/2018/563801a584/13l5NWM1Hsv", "parentPublication": { "id": "proceedings/icse/2018/5638/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2021/1403/0/140300a455", "title": "Is Algorithm Comprehension Different from Program Comprehension?", "doi": null, "abstractUrl": "/proceedings-article/icpc/2021/140300a455/1tB7wCcSiEU", "parentPublication": { "id": "proceedings/icpc/2021/1403/0/", "title": "2021 IEEE/ACM 29th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1E2wzmJytAk", "title": "2022 IEEE Conference on Software Testing, Verification and Validation (ICST)", "acronym": "icst", "groupId": "1001832", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1E2wEY3LjqM", "doi": "10.1109/ICST53961.2022.00002", "title": "2022 IEEE 15th International Conference on Software Testing, Verification and Validation", "normalizedTitle": "2022 IEEE 15th International Conference on Software Testing, Verification and Validation", "abstract": "2022 IEEE 15th International Conference on Software Testing, Verification and Validation", "abstracts": [ { "abstractType": "Regular", "content": "2022 IEEE 15th International Conference on Software Testing, Verification and Validation", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "2022 IEEE 15th International Conference on Software Testing, Verification and Validation", "fno": "667900z003", "keywords": [], "authors": [], "idPrefix": "icst", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2022-04-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2022", "issn": "2159-4848", "isbn": "978-1-6654-6679-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "667900z001", "articleId": "1E2wIehfTCo", "__typename": "AdjacentArticleType" }, "next": { "fno": "667900z004", "articleId": "1E2wGQnTsk0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwbcJ4J", "title": "Proceedings Sixth Australian Conference on Computer-Human Interaction", "acronym": "ozchi", "groupId": "1000154", "volume": "0", "displayVolume": "0", "year": "1996", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJRPhS", "doi": "10.1109/OZCHI.1996.560008", "title": "Generating Direct Manipulation Program Editors", "normalizedTitle": "Generating Direct Manipulation Program Editors", "abstract": "Abstract: Language specific editors are cognisant of the syntax and semantics of the programming language they manipulate. Despite the various potential advantages of language specific editors, they have not been widely accepted by software developers for serious software development. On the other hand direct manipulation editors, which are also cognisant of the entities they manipulate, have proven to be successful in other domains such as drawing and VLSI design tools. Thus, it is worth while investigating the incorporation of direct manipulation mechanisms into program editors. This paper presents a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors incorporating direct manipulation from a specification of the desired editing mechanisms.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract: Language specific editors are cognisant of the syntax and semantics of the programming language they manipulate. Despite the various potential advantages of language specific editors, they have not been widely accepted by software developers for serious software development. On the other hand direct manipulation editors, which are also cognisant of the entities they manipulate, have proven to be successful in other domains such as drawing and VLSI design tools. Thus, it is worth while investigating the incorporation of direct manipulation mechanisms into program editors. This paper presents a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors incorporating direct manipulation from a specification of the desired editing mechanisms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract: Language specific editors are cognisant of the syntax and semantics of the programming language they manipulate. Despite the various potential advantages of language specific editors, they have not been widely accepted by software developers for serious software development. On the other hand direct manipulation editors, which are also cognisant of the entities they manipulate, have proven to be successful in other domains such as drawing and VLSI design tools. Thus, it is worth while investigating the incorporation of direct manipulation mechanisms into program editors. This paper presents a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors incorporating direct manipulation from a specification of the desired editing mechanisms.", "fno": "75250177", "keywords": [ "Text Editing Direct Manipulation Program Editors Language Specific Editors Software Development Direct Manipulation Editors Direct Manipulation Mechanisms Editing Mechanisms" ], "authors": [ { "affiliation": "Flinders Univ. of South Australia, Adelaide, SA, Australia", "fullName": "M. Read", "givenName": "M.", "surname": "Read", "__typename": "ArticleAuthorType" }, { "affiliation": "Flinders Univ. of South Australia, Adelaide, SA, Australia", "fullName": "C. Marlin", "givenName": "C.", "surname": "Marlin", "__typename": "ArticleAuthorType" } ], "idPrefix": "ozchi", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1996-11-01T00:00:00", "pubType": "proceedings", "pages": "0177", "year": "1996", "issn": null, "isbn": "0-8186-7525-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "75250169", "articleId": "12OmNvAAtwp", "__typename": "AdjacentArticleType" }, "next": { "fno": "75250184", "articleId": "12OmNyrqzmm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwbcJ4J", "title": "Proceedings Sixth Australian Conference on Computer-Human Interaction", "acronym": "ozchi", "groupId": "1000154", "volume": "0", "displayVolume": "0", "year": "1996", "__typename": "ProceedingType" }, "article": { "id": "12OmNy4IF6D", "doi": "10.1109/OZCHI.1996.560162", "title": "Specifying Direct Manipulation within Program Editors", "normalizedTitle": "Specifying Direct Manipulation within Program Editors", "abstract": "Language specific editors, since they are cognizant of the syntax and semantics of the programming language they manipulate, should provide a number of advantages for software developers. However, they have not been widely accepted by software developers for serious software development. In other domains, such as drawing and VLSI design, direct manipulation editors (which are also cognizant of the entities they manipulate) have proven to be successful. Consequently, the incorporation of direct manipulation into program editors is worthy of investigation. Furthermore, the generation of editors from descriptions of the language to be handled and the editing mechanisms to be used has a number of advantages. Thus, this paper outlines a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors from a specification of the desired editing mechanisms.", "abstracts": [ { "abstractType": "Regular", "content": "Language specific editors, since they are cognizant of the syntax and semantics of the programming language they manipulate, should provide a number of advantages for software developers. However, they have not been widely accepted by software developers for serious software development. In other domains, such as drawing and VLSI design, direct manipulation editors (which are also cognizant of the entities they manipulate) have proven to be successful. Consequently, the incorporation of direct manipulation into program editors is worthy of investigation. Furthermore, the generation of editors from descriptions of the language to be handled and the editing mechanisms to be used has a number of advantages. Thus, this paper outlines a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors from a specification of the desired editing mechanisms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Language specific editors, since they are cognizant of the syntax and semantics of the programming language they manipulate, should provide a number of advantages for software developers. However, they have not been widely accepted by software developers for serious software development. In other domains, such as drawing and VLSI design, direct manipulation editors (which are also cognizant of the entities they manipulate) have proven to be successful. Consequently, the incorporation of direct manipulation into program editors is worthy of investigation. Furthermore, the generation of editors from descriptions of the language to be handled and the editing mechanisms to be used has a number of advantages. Thus, this paper outlines a technique for specifying direct manipulation editing of programs which is amenable to the generation of language specific editors from a specification of the desired editing mechanisms.", "fno": "75250346", "keywords": [ "Language Specific Editors", "Direct Manipulation", "Program Editing", "Generation Of Language Specific Editors", "State Machines" ], "authors": [ { "affiliation": "The Flinders University of South Australia", "fullName": "Michael Read", "givenName": "Michael", "surname": "Read", "__typename": "ArticleAuthorType" } ], "idPrefix": "ozchi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1996-11-01T00:00:00", "pubType": "proceedings", "pages": "0346", "year": "1996", "issn": null, "isbn": "0-8186-7525-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "75250344", "articleId": "12OmNyywxEZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "75250348", "articleId": "12OmNAObbBf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2003/1874/1/187410040c", "title": "Using Familiar Single-User Editors for Collaborative Editing", "doi": null, "abstractUrl": "/proceedings-article/hicss/2003/187410040c/12OmNAXxXjP", "parentPublication": { "id": "proceedings/hicss/2003/1874/1", "title": "36th Annual Hawaii International Conference on System Sciences, 2003. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl/1995/7045/0/70450203", "title": "DiaGen: a generator for diagram editors providing direct manipulation and execution of diagrams", "doi": null, "abstractUrl": "/proceedings-article/vl/1995/70450203/12OmNqFJhTJ", "parentPublication": { "id": "proceedings/vl/1995/7045/0", "title": "Visual Languages, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ozchi/1996/7525/0/75250177", "title": "Generating Direct Manipulation Program Editors", "doi": null, "abstractUrl": "/proceedings-article/ozchi/1996/75250177/12OmNrJRPhS", "parentPublication": { "id": "proceedings/ozchi/1996/7525/0", "title": "Proceedings Sixth Australian Conference on Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880524", "title": "An Immersive Path Editor for Keyframe Animation using Hand Direct Manipulation and 3D Gearbox Widgets", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880524/12OmNrkT7BD", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wise/2000/0577/1/05771401", "title": "Conflict Control Locking in Distributed Cooperative Graphics Editors", "doi": null, "abstractUrl": "/proceedings-article/wise/2000/05771401/12OmNx7G5XN", "parentPublication": { "id": "proceedings/wise/2000/0577/1", "title": "Web Information Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/elml/2009/3528/0/3528a090", "title": "CollabEd: A Platform for Collaboratizing Existing Editors", "doi": null, "abstractUrl": "/proceedings-article/elml/2009/3528a090/12OmNzC5SR2", "parentPublication": { "id": "proceedings/elml/2009/3528/0", "title": "Mobile, Hybrid, and On-line Learning, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/1997/8485/0/84850099", "title": "A Component Framework for Direct-Manipulation Editors", "doi": null, "abstractUrl": "/proceedings-article/tools/1997/84850099/12OmNzUxO4N", "parentPublication": { "id": "proceedings/tools/1997/8485/0", "title": "Proceedings. Technology of Object-Oriented Languages and Systems, TOOLS 25 (Cat. No.97TB100239)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/04/mcg2010040042", "title": "Direct Manipulation Blendshapes", "doi": null, "abstractUrl": "/magazine/cg/2010/04/mcg2010040042/13rRUB6Sq4P", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/1988/08/e1098", "title": "A System for Generating Language-Oriented Editors", "doi": null, "abstractUrl": "/journal/ts/1988/08/e1098/13rRUwj7cqy", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/1993/09/e0935", "title": "Incremental LL(1) Parsing in Language-Based Editors", "doi": null, "abstractUrl": "/journal/ts/1993/09/e0935/13rRUwjGoHC", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwnH4Qq", "title": "2011 IEEE 11th International Conference on Data Mining Workshops", "acronym": "icdmw", "groupId": "1001620", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJAdRk", "doi": "10.1109/ICDMW.2011.78", "title": "Evaluating Hurricane Intensity Prediction Techniques in Real Time", "normalizedTitle": "Evaluating Hurricane Intensity Prediction Techniques in Real Time", "abstract": "While the accuracy of hurricane track prediction has been improving, predicting intensity, the maximum sustained wind speed, is still a very difficult challenge. This is problematic because the destructive power of a hurricane is directly related to its intensity. In this paper, we present Prediction Intensity Interval model for Hurricanes (PIIH) which combines sophisticated data mining techniques to create an online real time model for accurate intensity predictions and we present a web-based framework to dynamically compare PIIH to operational models used by the National Hurricane Center (NHC). The created dynamic website tracks, compares, and provides visualization to facilitate immediate comparisons of prediction techniques. This paper is a work in progress paper reporting on both, new features of the PIIH model and online visualization of the accuracy of that model as compared to other techniques.", "abstracts": [ { "abstractType": "Regular", "content": "While the accuracy of hurricane track prediction has been improving, predicting intensity, the maximum sustained wind speed, is still a very difficult challenge. This is problematic because the destructive power of a hurricane is directly related to its intensity. In this paper, we present Prediction Intensity Interval model for Hurricanes (PIIH) which combines sophisticated data mining techniques to create an online real time model for accurate intensity predictions and we present a web-based framework to dynamically compare PIIH to operational models used by the National Hurricane Center (NHC). The created dynamic website tracks, compares, and provides visualization to facilitate immediate comparisons of prediction techniques. This paper is a work in progress paper reporting on both, new features of the PIIH model and online visualization of the accuracy of that model as compared to other techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "While the accuracy of hurricane track prediction has been improving, predicting intensity, the maximum sustained wind speed, is still a very difficult challenge. This is problematic because the destructive power of a hurricane is directly related to its intensity. In this paper, we present Prediction Intensity Interval model for Hurricanes (PIIH) which combines sophisticated data mining techniques to create an online real time model for accurate intensity predictions and we present a web-based framework to dynamically compare PIIH to operational models used by the National Hurricane Center (NHC). The created dynamic website tracks, compares, and provides visualization to facilitate immediate comparisons of prediction techniques. This paper is a work in progress paper reporting on both, new features of the PIIH model and online visualization of the accuracy of that model as compared to other techniques.", "fno": "4409a023", "keywords": [ "Hurricane", "Intensity Prediction", "Prediction Interval", "Markov Chain" ], "authors": [ { "affiliation": null, "fullName": "Vladimir Jovanovic", "givenName": "Vladimir", "surname": "Jovanovic", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Margaret H. Dunham", "givenName": "Margaret H.", "surname": "Dunham", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Michael Hahsler", "givenName": "Michael", "surname": "Hahsler", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yu Su", "givenName": "Yu", "surname": "Su", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdmw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-12-01T00:00:00", "pubType": "proceedings", "pages": "23-29", "year": "2011", "issn": null, "isbn": "978-0-7695-4409-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4409a015", "articleId": "12OmNwe2IsN", "__typename": "AdjacentArticleType" }, "next": { "fno": "4409a030", "articleId": "12OmNzXFow0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccis/2012/4789/0/4789b384", "title": "The Prediction Methods of the Water Influx Intensity of the Non-homogeneous Aquifer Gas Reservoir", "doi": null, "abstractUrl": "/proceedings-article/iccis/2012/4789b384/12OmNvSKNZR", "parentPublication": { "id": "proceedings/iccis/2012/4789/0", "title": "2012 Fourth International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2011/4367/0/4367a072", "title": "Interactive Visualization and Analysis of Hurricane Data", "doi": null, "abstractUrl": "/proceedings-article/itng/2011/4367a072/12OmNwtWfIj", "parentPublication": { "id": "proceedings/itng/2011/4367/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2009/3816/4/3816d491", "title": "Improved on Maximum Intensity Projection", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816d491/12OmNy6ZrZ3", "parentPublication": { "id": "proceedings/aici/2009/3816/4", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2010/4257/0/4257a098", "title": "A New Data Mining Model for Hurricane Intensity Prediction", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2010/4257a098/12OmNyxXlwb", "parentPublication": { "id": "proceedings/icdmw/2010/4257/0", "title": "2010 IEEE International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050709", "title": "Case Study on Visualizing Hurricanes Using Illustration-Inspired Techniques", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050709/13rRUwI5UfZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2011/01/mcs2011010009", "title": "The Future of Hurricane Prediction", "doi": null, "abstractUrl": "/magazine/cs/2011/01/mcs2011010009/13rRUwInv8o", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2011/01/mcs2011010022", "title": "High-Resolution Hurricane Forecasts", "doi": null, "abstractUrl": "/magazine/cs/2011/01/mcs2011010022/13rRUxYrbPX", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2021/2186/0/218600a298", "title": "Hurricane Damage Prediction based on Convolutional Neural Network Models", "doi": null, "abstractUrl": "/proceedings-article/icaice/2021/218600a298/1Et4EMI5ssM", "parentPublication": { "id": "proceedings/icaice/2021/2186/0", "title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2021/2186/0/218600a386", "title": "Hurricane Damage Prediction on Satellite Imagery based on Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icaice/2021/218600a386/1Et4rPXlduw", "parentPublication": { "id": "proceedings/icaice/2021/2186/0", "title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCmpcNk", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNscxjaI", "doi": "10.1109/VISUAL.2005.1532858", "title": "Illustration and photography inspired visualization of flows and volumes", "normalizedTitle": "Illustration and photography inspired visualization of flows and volumes", "abstract": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey three-dimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey three-dimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey three-dimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "fno": "01532858", "keywords": [ "Flow Visualisation", "Rendering Computer Graphics", "Turbulence", "Transfer Functions", "Data Visualisation", "Photography", "Schlieren Systems", "Interactive Systems", "Three Dimensional Turbulent Flow", "Photographic Flow Visualization Techniques", "Interactive Volume Rendering", "Contextual Illustrative Styles", "Two Dimensional Transfer Functions", "Shadowgraphy Shaders", "Structure Enhancement Techniques", "Time Varying Volume Datasets", "Visual Clutter", "Photography", "Data Visualization", "Computer Graphics", "Transfer Functions", "Visual System", "Temperature", "Color", "Stress", "Electric Shock", "Space Vehicles" ], "authors": [ { "affiliation": "Purdue Univ., West Lafayette, IN, USA", "fullName": "N.A. Svakhine", "givenName": "N.A.", "surname": "Svakhine", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue Univ., West Lafayette, IN, USA", "fullName": "Y. Jang", "givenName": "Y.", "surname": "Jang", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue Univ., West Lafayette, IN, USA", "fullName": "D. Ebert", "givenName": "D.", "surname": "Ebert", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "K. Gaither", "givenName": "K.", "surname": "Gaither", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-01-01T00:00:00", "pubType": "proceedings", "pages": "687,688,689,690,691,692,693,694", "year": "2005", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "27660073", "articleId": "12OmNrJAdUQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "27660074", "articleId": "12OmNzTH0TG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fie/2005/9077/0/01611990", "title": "Work in progress - a Web-based virtual supersonic nozzle module as a visualization and active learning tool for the undergraduate thermodynamics course", "doi": null, "abstractUrl": "/proceedings-article/fie/2005/01611990/12OmNAYGlw7", "parentPublication": { "id": "proceedings/fie/2005/9077/0", "title": "35th Annual Frontiers in Education", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870329", "title": "Unsteady Flow Volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870329/12OmNqI04HL", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660086", "title": "Illustration-inspired techniques for visualizing time-varying data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660086/12OmNvkplf9", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsi/2014/4324/0/4324a112", "title": "TVD and ENO Applications to Supersonic Flows in 2D", "doi": null, "abstractUrl": "/proceedings-article/mcsi/2014/4324a112/12OmNvxbhJA", "parentPublication": { "id": "proceedings/mcsi/2014/4324/0", "title": "2014 International Conference on Mathematics and Computers in Sciences and in Industry (MCSI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a333", "title": "Illustration-Inspired Visualization of Blood Flow Dynamics", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a333/12OmNxHJ9n9", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532856", "title": "VolumeShop: an interactive system for direct volume illustration", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532856/12OmNzYNNaU", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2017/2628/0/2628a087", "title": "Parametric Design of Complex Elastic Components", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2017/2628a087/12OmNzahcbV", "parentPublication": { "id": "proceedings/icmcce/2017/2628/0", "title": "2017 Second International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660087", "title": "Illustration and Photography Inspired Visualization of Flows and Volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660087/12OmNzfXaxY", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111690", "title": "Autostereoscopic 3D Display with Long Visualization Depth Using Referential Viewing Area-Based Integral Photography", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111690/13rRUyfKIHG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iseeie/2022/6874/0/687400a218", "title": "Influence of gas pressure on flow field characteristics of the underwater gas jet", "doi": null, "abstractUrl": "/proceedings-article/iseeie/2022/687400a218/1FWmJ4gmjja", "parentPublication": { "id": "proceedings/iseeie/2022/6874/0", "title": "2022 International Symposium on Electrical, Electronics and Information Engineering (ISEEIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwCJP06", "title": "IEEE Visualization 2005", "acronym": "vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNwFicZ4", "doi": "10.1109/VISUAL.2005.1532857", "title": "Illustration-inspired techniques for visualizing time-varying data", "normalizedTitle": "Illustration-inspired techniques for visualizing time-varying data", "abstract": "Traditionally, time-varying data has been visualized using snapshots of the individual time steps or an animation of the snapshots shown in a sequential manner. For larger datasets with many time-varying features, animation can be limited in its use, as an observer can only track a limited number of features over the last few frames. Visually inspecting each snapshot is not practical either for a large number of time-steps. We propose new techniques inspired from the illustration literature to convey change over time more effectively in a time-varying dataset. Speedlines are used extensively by cartoonists to convey motion, speed, or change over different panels. Flow ribbons are another technique used by cartoonists to depict motion in a single frame. Strobe silhouettes are used to depict previous positions of an object to convey the previous positions of the object to the user. These illustration-inspired techniques can be used in conjunction with animation to convey change over time.", "abstracts": [ { "abstractType": "Regular", "content": "Traditionally, time-varying data has been visualized using snapshots of the individual time steps or an animation of the snapshots shown in a sequential manner. For larger datasets with many time-varying features, animation can be limited in its use, as an observer can only track a limited number of features over the last few frames. Visually inspecting each snapshot is not practical either for a large number of time-steps. We propose new techniques inspired from the illustration literature to convey change over time more effectively in a time-varying dataset. Speedlines are used extensively by cartoonists to convey motion, speed, or change over different panels. Flow ribbons are another technique used by cartoonists to depict motion in a single frame. Strobe silhouettes are used to depict previous positions of an object to convey the previous positions of the object to the user. These illustration-inspired techniques can be used in conjunction with animation to convey change over time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Traditionally, time-varying data has been visualized using snapshots of the individual time steps or an animation of the snapshots shown in a sequential manner. For larger datasets with many time-varying features, animation can be limited in its use, as an observer can only track a limited number of features over the last few frames. Visually inspecting each snapshot is not practical either for a large number of time-steps. We propose new techniques inspired from the illustration literature to convey change over time more effectively in a time-varying dataset. Speedlines are used extensively by cartoonists to convey motion, speed, or change over different panels. Flow ribbons are another technique used by cartoonists to depict motion in a single frame. Strobe silhouettes are used to depict previous positions of an object to convey the previous positions of the object to the user. These illustration-inspired techniques can be used in conjunction with animation to convey change over time.", "fno": "01532857", "keywords": [ "Data Visualisation", "Computer Animation", "Motion Estimation", "Rendering Computer Graphics", "Feature Extraction", "Time Varying Data Visualization", "Computer Animation", "Flow Ribbons", "Strobe Silhouettes", "Illustration Inspired Techniques", "Data Visualization", "Feature Extraction", "Animation", "Birds", "Weather Forecasting", "Ultrasonic Imaging", "Data Analysis", "Algorithm Design And Analysis", "Data Mining", "Humans" ], "authors": [ { "affiliation": "Maryland Univ., Baltimore, MD, USA", "fullName": "A. Joshi", "givenName": "A.", "surname": "Joshi", "__typename": "ArticleAuthorType" }, { "affiliation": "Maryland Univ., Baltimore, MD, USA", "fullName": "P. Rheingans", "givenName": "P.", "surname": "Rheingans", "__typename": "ArticleAuthorType" } ], "idPrefix": "vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-01-01T00:00:00", "pubType": "proceedings", "pages": "679,680,681,682,683,684,685,686", "year": "2005", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01532849", "articleId": "12OmNyv7m6G", "__typename": "AdjacentArticleType" }, "next": { "fno": "01532859", "articleId": "12OmNxAlzZw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/01532858", "title": "Illustration and photography inspired visualization of flows and volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532858/12OmNscxjaI", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660086", "title": "Illustration-inspired techniques for visualizing time-varying data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660086/12OmNvkplf9", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a333", "title": "Illustration-Inspired Visualization of Blood Flow Dynamics", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a333/12OmNxHJ9n9", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660087", "title": "Illustration and Photography Inspired Visualization of Flows and Volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660087/12OmNzfXaxY", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2012/4829/0/4829a198", "title": "Representing and Manipulating Mesh-Based Character Animations", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2012/4829a198/12OmNznkKdG", "parentPublication": { "id": "proceedings/sibgrapi/2012/4829/0", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050709", "title": "Case Study on Visualizing Hurricanes Using Illustration-Inspired Techniques", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050709/13rRUwI5UfZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1109", "title": "Dynamic View Selection for Time-Varying Volumes", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1109/13rRUwcAqq6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1998/02/v0098", "title": "A New Line Integral Convolution Algorithm for Visualizing Time-Varying Flow Fields", "doi": null, "abstractUrl": "/journal/tg/1998/02/v0098/13rRUxBa55T", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539342", "title": "Time-Hierarchical Clustering and Visualization of Weather Forecast Ensembles", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539342/13rRUxYIN4d", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09914804", "title": "Animated Vega-Lite: Unifying Animation with a Grammar of Interactive Graphics", "doi": null, "abstractUrl": "/journal/tg/2023/01/09914804/1Hmgc5h7Clq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCmpcNk", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNzfXaxY", "doi": "10.1109/VIS.2005.53", "title": "Illustration and Photography Inspired Visualization of Flows and Volumes", "normalizedTitle": "Illustration and Photography Inspired Visualization of Flows and Volumes", "abstract": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey threedimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey threedimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Understanding and analyzing complex volumetrically varying data is a difficult problem. Many computational visualization techniques have had only limited success in succinctly portraying the structure of three-dimensional turbulent flow. Motivated by both the extensive history and success of illustration and photographic flow visualization techniques, we have developed a new interactive volume rendering and visualization system for flows and volumes that simulates and enhances traditional illustration, experimental advection, and photographic flow visualization techniques. Our system uses a combination of varying focal and contextual illustrative styles, new advanced two-dimensional transfer functions, enhanced Schlieren and shadowgraphy shaders, and novel oriented structure enhancement techniques to allow interactive visualization, exploration, and comparative analysis of scalar, vector, and time-varying volume datasets. Both traditional illustration techniques and photographic flow visualization techniques effectively reduce visual clutter by using compact oriented structure information to convey threedimensional structures. Therefore, a key to the effectiveness of our system is using one-dimensional (Schlieren and shadowgraphy) and two-dimensional (silhouette) oriented structural information to reduce visual clutter, while still providing enough three-dimensional structural information for the user's visual system to understand complex three-dimensional flow data. By combining these oriented feature visualization techniques with flexible transfer function controls, we can visualize scalar and vector data, allow comparative visualization of flow properties in a succinct, informative manner, and provide continuity for visualizing time-varying datasets.", "fno": "27660087", "keywords": [], "authors": [ { "affiliation": "Purdue University", "fullName": "Nikolai A. Svakhine", "givenName": "Nikolai A.", "surname": "Svakhine", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Yun Jang", "givenName": "Yun", "surname": "Jang", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "David Ebert", "givenName": "David", "surname": "Ebert", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Texas at Austin", "fullName": "Kelly Gaither", "givenName": "Kelly", "surname": "Gaither", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-10-01T00:00:00", "pubType": "proceedings", "pages": "87", "year": "2005", "issn": null, "isbn": "0-7803-9462-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "27660086", "articleId": "12OmNvkplf9", "__typename": "AdjacentArticleType" }, "next": { "fno": "27660088", "articleId": "12OmNAkWvL3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1995/7187/0/71870329", "title": "Unsteady Flow Volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870329/12OmNqI04HL", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532858", "title": "Illustration and photography inspired visualization of flows and volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532858/12OmNscxjaI", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660086", "title": "Illustration-inspired techniques for visualizing time-varying data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660086/12OmNvkplf9", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1990/2083/0/00146395", "title": "Automatic illustration of 3D geometric models: surfaces", "doi": null, "abstractUrl": "/proceedings-article/visual/1990/00146395/12OmNx6g6h6", "parentPublication": { "id": "proceedings/visual/1990/2083/0", "title": "1990 First IEEE Conference on Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a333", "title": "Illustration-Inspired Visualization of Blood Flow Dynamics", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a333/12OmNxHJ9n9", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532856", "title": "VolumeShop: an interactive system for direct volume illustration", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532856/12OmNzYNNaU", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050709", "title": "Case Study on Visualizing Hurricanes Using Illustration-Inspired Techniques", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050709/13rRUwI5UfZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010077", "title": "Illustration-Inspired Depth Enhanced Volumetric Medical Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010077/13rRUwjGoFR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/03/mcg2005030031", "title": "Illustration Motifs for Effective Medical Volume Illustration", "doi": null, "abstractUrl": "/magazine/cg/2005/03/mcg2005030031/13rRUyft7x2", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v0877", "title": "Real-Time Illustration of Vascular Structures", "doi": null, "abstractUrl": "/journal/tg/2006/05/v0877/13rRUytF41r", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxdVh2l", "title": "2006 2nd IEEE International Conference on Space Mission Challenges for Information Technology", "acronym": "smc-it", "groupId": "1002093", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNvAiSuA", "doi": "10.1109/SMC-IT.2006.67", "title": "SIMSCAPE Terrain Modeling Toolkit", "normalizedTitle": "SIMSCAPE Terrain Modeling Toolkit", "abstract": "Planetary space mission applications involving landers and surface exploration vehicles make extensive use of terrain models within their simulation testbeds. Such terrain models are large, complex and have a variety of attributes including topography, reflectivity, soil mechanics, and hazard information. Sources for the terrain models include planetary data archives, field tests, and analytically constructed models. Simulation uses of such models include surface rover vehicles? kinematics and dynamics models, instrument models, camera models and robotic arm models. This paper describes the SIMSCAPE middleware toolkit for providing a common infrastructure to represent terrain model data from multiple data sources and make them available to simulation applications. SIMSCAPE simplifies the overall simulation design by eliminating the traditional need for custom terrain model interfaces to terrain data sources for simulation users. SIMSCAPE provides a collection of libraries and tools to use and manage terrain environment models within the simulation applications.", "abstracts": [ { "abstractType": "Regular", "content": "Planetary space mission applications involving landers and surface exploration vehicles make extensive use of terrain models within their simulation testbeds. Such terrain models are large, complex and have a variety of attributes including topography, reflectivity, soil mechanics, and hazard information. Sources for the terrain models include planetary data archives, field tests, and analytically constructed models. Simulation uses of such models include surface rover vehicles? kinematics and dynamics models, instrument models, camera models and robotic arm models. This paper describes the SIMSCAPE middleware toolkit for providing a common infrastructure to represent terrain model data from multiple data sources and make them available to simulation applications. SIMSCAPE simplifies the overall simulation design by eliminating the traditional need for custom terrain model interfaces to terrain data sources for simulation users. SIMSCAPE provides a collection of libraries and tools to use and manage terrain environment models within the simulation applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Planetary space mission applications involving landers and surface exploration vehicles make extensive use of terrain models within their simulation testbeds. Such terrain models are large, complex and have a variety of attributes including topography, reflectivity, soil mechanics, and hazard information. Sources for the terrain models include planetary data archives, field tests, and analytically constructed models. Simulation uses of such models include surface rover vehicles? kinematics and dynamics models, instrument models, camera models and robotic arm models. This paper describes the SIMSCAPE middleware toolkit for providing a common infrastructure to represent terrain model data from multiple data sources and make them available to simulation applications. SIMSCAPE simplifies the overall simulation design by eliminating the traditional need for custom terrain model interfaces to terrain data sources for simulation users. SIMSCAPE provides a collection of libraries and tools to use and manage terrain environment models within the simulation applications.", "fno": "26440149", "keywords": [], "authors": [ { "affiliation": "Jet Propulsion Laboratory, USA", "fullName": "Abhinandan Jain", "givenName": "Abhinandan", "surname": "Jain", "__typename": "ArticleAuthorType" }, { "affiliation": "Jet Propulsion Laboratory, USA", "fullName": "Jonathan Cameron", "givenName": "Jonathan", "surname": "Cameron", "__typename": "ArticleAuthorType" }, { "affiliation": "Jet Propulsion Laboratory, USA", "fullName": "Christopher Lim", "givenName": "Christopher", "surname": "Lim", "__typename": "ArticleAuthorType" }, { "affiliation": "Jet Propulsion Laboratory, USA", "fullName": "John Guineau", "givenName": "John", "surname": "Guineau", "__typename": "ArticleAuthorType" } ], "idPrefix": "smc-it", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-07-01T00:00:00", "pubType": "proceedings", "pages": "149-156", "year": "2006", "issn": null, "isbn": "0-7695-2644-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26440140", "articleId": "12OmNBRbktO", "__typename": "AdjacentArticleType" }, "next": { "fno": "26440157", "articleId": "12OmNBaT62m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2006/2542/0/25420061", "title": "3D Terrain Modeling for Rover Localization and Navigation", "doi": null, "abstractUrl": "/proceedings-article/crv/2006/25420061/12OmNAQrYBf", "parentPublication": { "id": "proceedings/crv/2006/2542/0", "title": "The 3rd Canadian Conference on Computer and Robot Vision (CRV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a999", "title": "Modeling and Generalization of Discrete Morse Terrain Decompositions", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a999/12OmNBTs7BJ", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/supercomputing/1996/854/0/01392885", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "doi": null, "abstractUrl": "/proceedings-article/supercomputing/1996/01392885/12OmNC8MsyN", "parentPublication": { "id": "proceedings/supercomputing/1996/854/0", "title": "Proceedings of the 1996 ACM/IEEE Conference on Supercomputing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2011/4446/0/4446a177", "title": "Large Terrain Modeling and Visualization for Planets", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2011/4446a177/12OmNy50gji", "parentPublication": { "id": "proceedings/smc-it/2011/4446/0", "title": "Space Mission Challenges for Information Technology, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/1996/2642/0/26420008", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "doi": null, "abstractUrl": "/proceedings-article/sc/1996/26420008/12OmNzBwGyD", "parentPublication": { "id": "proceedings/sc/1996/2642/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2006/0366/0/04036569", "title": "Dynamic View-Dependent Multiresolution Terrain Visualization", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036569/12OmNzG4gs3", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a166", "title": "Advances in Physically-Based Modeling of Deformable Soil for Real-Time Operator Training Simulators", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a166/12OmNzVoBvk", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2007/2786/0/27860243", "title": "Terrain Modelling for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/crv/2007/27860243/12OmNzwHvdp", "parentPublication": { "id": "proceedings/crv/2007/2786/0", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2910/0/00201666", "title": "Robust disparity estimation in terrain modeling for spacecraft navigation", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201666/12OmNzwpUpq", "parentPublication": { "id": "proceedings/icpr/1992/2910/0", "title": "1992 11th IAPR International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/01/mcg2010010080", "title": "Dynamic Terrain for Multiuser Real-Time Environments", "doi": null, "abstractUrl": "/magazine/cg/2010/01/mcg2010010080/13rRUwhpBSy", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirm", "title": "Space Mission Challenges for Information Technology, IEEE International Conference on", "acronym": "smc-it", "groupId": "1002093", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNy50gji", "doi": "10.1109/SMC-IT.2011.11", "title": "Large Terrain Modeling and Visualization for Planets", "normalizedTitle": "Large Terrain Modeling and Visualization for Planets", "abstract": "Physics-based simulations are actively used in the design, testing, and operations phases of surface and near-surface planetary space missions. One of the challenges in real-time simulations is the ability to handle large multi-resolution terrain data sets within models as well as for visualization. In this paper, we describe special techniques that we have developed for visualization, paging, and data storage for dealing with these large data sets. The visualization technique uses a real-time GPU-based continuous level-of-detail technique that delivers multiple frames a second performance even for planetary scale terrain model sizes.", "abstracts": [ { "abstractType": "Regular", "content": "Physics-based simulations are actively used in the design, testing, and operations phases of surface and near-surface planetary space missions. One of the challenges in real-time simulations is the ability to handle large multi-resolution terrain data sets within models as well as for visualization. In this paper, we describe special techniques that we have developed for visualization, paging, and data storage for dealing with these large data sets. The visualization technique uses a real-time GPU-based continuous level-of-detail technique that delivers multiple frames a second performance even for planetary scale terrain model sizes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Physics-based simulations are actively used in the design, testing, and operations phases of surface and near-surface planetary space missions. One of the challenges in real-time simulations is the ability to handle large multi-resolution terrain data sets within models as well as for visualization. In this paper, we describe special techniques that we have developed for visualization, paging, and data storage for dealing with these large data sets. The visualization technique uses a real-time GPU-based continuous level-of-detail technique that delivers multiple frames a second performance even for planetary scale terrain model sizes.", "fno": "4446a177", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Steven Myint", "givenName": "Steven", "surname": "Myint", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Abhinandan Jain", "givenName": "Abhinandan", "surname": "Jain", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jonathan Cameron", "givenName": "Jonathan", "surname": "Cameron", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christopher Lim", "givenName": "Christopher", "surname": "Lim", "__typename": "ArticleAuthorType" } ], "idPrefix": "smc-it", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-08-01T00:00:00", "pubType": "proceedings", "pages": "177-183", "year": "2011", "issn": null, "isbn": "978-0-7695-4446-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4446a169", "articleId": "12OmNzn38WM", "__typename": "AdjacentArticleType" }, "next": { "fno": "4446a184", "articleId": "12OmNAWYKJV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2006/2542/0/25420061", "title": "3D Terrain Modeling for Rover Localization and Navigation", "doi": null, "abstractUrl": "/proceedings-article/crv/2006/25420061/12OmNAQrYBf", "parentPublication": { "id": "proceedings/crv/2006/2542/0", "title": "The 3rd Canadian Conference on Computer and Robot Vision (CRV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2007/2928/0/29280481", "title": "An Improved Approach on Visualization of Large-Scale Terrain Surface", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2007/29280481/12OmNAk5HPS", "parentPublication": { "id": "proceedings/cgiv/2007/2928/0", "title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icie/2009/3679/2/3679b090", "title": "An Algorithm for Real-Time Visualization of Large-Scale Terrain", "doi": null, "abstractUrl": "/proceedings-article/icie/2009/3679b090/12OmNApcuhq", "parentPublication": { "id": "icie/2009/3679/2", "title": "2009 WASE International Conference on Information Engineering (ICIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2009/3887/0/pid980954", "title": "Terrain Visualization and Application Based on 3D GIS", "doi": null, "abstractUrl": "/proceedings-article/icise/2009/pid980954/12OmNC36tPH", "parentPublication": { "id": "proceedings/icise/2009/3887/0", "title": "Information Science and Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2006/2644/0/26440149", "title": "SIMSCAPE Terrain Modeling Toolkit", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2006/26440149/12OmNvAiSuA", "parentPublication": { "id": "proceedings/smc-it/2006/2644/0", "title": "2006 2nd IEEE International Conference on Space Mission Challenges for Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmamh/2007/3065/0/30650218", "title": "A New Method for Dynamic-Loading Large Terrain Dataset Visualization in Flight Simulation", "doi": null, "abstractUrl": "/proceedings-article/dmamh/2007/30650218/12OmNwMob63", "parentPublication": { "id": "proceedings/dmamh/2007/3065/0", "title": "Digital Media and its Application in Museum &amp; Heritage/Digital Media and its Application in Museum &amp; Heritage, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2009/3637/0/3637a237", "title": "Dspace: Real-Time 3D Visualization System for Spacecraft Dynamics Simulation", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2009/3637a237/12OmNwkR5vV", "parentPublication": { "id": "proceedings/smc-it/2009/3637/0", "title": "2009 Third IEEE International Conference on Space Mission Challenges for Information Technology (SMC-IT 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760019", "title": "Large Scale Terrain Visualization Using the Restricted Quadtree Triangulation", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760019/12OmNxwWozX", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcasia/2004/2138/0/21380280", "title": "VIsualization for HPC Data - Large Terrain Model", "doi": null, "abstractUrl": "/proceedings-article/hpcasia/2004/21380280/12OmNy50g4O", "parentPublication": { "id": "proceedings/hpcasia/2004/2138/0", "title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2007/2786/0/27860243", "title": "Terrain Modelling for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/crv/2007/27860243/12OmNzwHvdp", "parentPublication": { "id": "proceedings/crv/2007/2786/0", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzZmZqZ", "title": "2008 Canadian Conference on Computer and Robot Vision", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyywxAC", "doi": "10.1109/CRV.2008.46", "title": "Path Planning for Planetary Exploration", "normalizedTitle": "Path Planning for Planetary Exploration", "abstract": "In this paper we present the work done at the Canadian Space Agency on the problem of planetary exploration. One of the main goals is the over-the-horizon navigation of a mobile robot on a Mars like environment. A key component is the ability to plan a path using maps of different resolutions and also to refine/replan when more data??becomes available. Our algorithms on path planning and??path segmentation are presented together with results??from two years of??experiments in realistic conditions.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present the work done at the Canadian Space Agency on the problem of planetary exploration. One of the main goals is the over-the-horizon navigation of a mobile robot on a Mars like environment. A key component is the ability to plan a path using maps of different resolutions and also to refine/replan when more data??becomes available. Our algorithms on path planning and??path segmentation are presented together with results??from two years of??experiments in realistic conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present the work done at the Canadian Space Agency on the problem of planetary exploration. One of the main goals is the over-the-horizon navigation of a mobile robot on a Mars like environment. A key component is the ability to plan a path using maps of different resolutions and also to refine/replan when more data??becomes available. Our algorithms on path planning and??path segmentation are presented together with results??from two years of??experiments in realistic conditions.", "fno": "3153a061", "keywords": [ "Space Robotics", "Planetary Exploration", "Path Planning", "Terrain Modelling" ], "authors": [ { "affiliation": null, "fullName": "Ioannis Rekleitis", "givenName": "Ioannis", "surname": "Rekleitis", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jean-Luc Bedwani", "givenName": "Jean-Luc", "surname": "Bedwani", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Erick Dupuis", "givenName": "Erick", "surname": "Dupuis", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Pierre Allard", "givenName": "Pierre", "surname": "Allard", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-05-01T00:00:00", "pubType": "proceedings", "pages": "61-68", "year": "2008", "issn": null, "isbn": "978-0-7695-3153-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3153a053", "articleId": "12OmNyuPLc8", "__typename": "AdjacentArticleType" }, "next": { "fno": "3153a071", "articleId": "12OmNAq3hQR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/robot/1992/2720/0/00220266", "title": "Robotic vehicles for planetary exploration", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220266/12OmNAtK4qP", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ahs/2008/3166/0/3166a263", "title": "Pennies from Heaven: A Retrospective on the Use of Wireless Sensor Networks for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/ahs/2008/3166a263/12OmNBh8gUm", "parentPublication": { "id": "proceedings/ahs/2008/3166/0", "title": "Adaptive Hardware and Systems, NASA/ESA Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2001/0948/0/09480183", "title": "Immersive Environment Technologies for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/vr/2001/09480183/12OmNqBtiZv", "parentPublication": { "id": "proceedings/vr/2001/0948/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itcs/2009/3688/1/3688a329", "title": "Analysis of Obstacle-Climbing Capability of Planetary Exploration Rover with Rocker-Bogie Structure", "doi": null, "abstractUrl": "/proceedings-article/itcs/2009/3688a329/12OmNwMXnuw", "parentPublication": { "id": "proceedings/itcs/2009/3688/1", "title": "Information Technology and Computer Science, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aero/2011/7350/0/05747283", "title": "Study on strategies for planetary exploration within the HG-project \"Planetary Evolution and Life\"", "doi": null, "abstractUrl": "/proceedings-article/aero/2011/05747283/12OmNwdbV58", "parentPublication": { "id": "proceedings/aero/2011/7350/0", "title": "IEEE Aerospace Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ihmsc/2011/4444/2/4444b201", "title": "Mission Planning Method for Planetary Rover Based on Path Planning", "doi": null, "abstractUrl": "/proceedings-article/ihmsc/2011/4444b201/12OmNwpoFIa", "parentPublication": { "id": "proceedings/ihmsc/2011/4444/2", "title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2009/3637/0/3637a106", "title": "A Path Planning System based on 3D Occlusion Detection for Lunar Exploration Rovers", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2009/3637a106/12OmNzWfp47", "parentPublication": { "id": "proceedings/smc-it/2009/3637/0", "title": "2009 Third IEEE International Conference on Space Mission Challenges for Information Technology (SMC-IT 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2007/2786/0/27860243", "title": "Terrain Modelling for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/crv/2007/27860243/12OmNzwHvdp", "parentPublication": { "id": "proceedings/crv/2007/2786/0", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/04/mcg2006040046", "title": "Adviser: Immersive Field Work for Planetary Geoscientists", "doi": null, "abstractUrl": "/magazine/cg/2006/04/mcg2006040046/13rRUzpzeEj", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBgQFLU", "title": "2006 International Conference on Information Acquisition", "acronym": "icia", "groupId": "1002411", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNz5s0Jj", "doi": "10.1109/ICIA.2006.305896", "title": "Automatic Generation of Digital Terrain Elevations with Single Satellite Imagery", "normalizedTitle": "Automatic Generation of Digital Terrain Elevations with Single Satellite Imagery", "abstract": "More and more engineering applications need denser and more accurate Digital Terrain Model (DTM) height data. Whereas, collecting additional height data in the field, if not impossible, is either expensive or time consuming or both. Advanced space technology has provided much single high-resolution satellite imageries almost worldwide. This paper discusses the idea of using Shape From Shading (SFS) methods with single high resolution imagery. Regular grids of digital terrain elevations are generated by a four-directional weighed algorithm we propose in this paper. Preliminary results are very encouraging and the methodology is going to be implemented with real satellite imagery and parallel computations", "abstracts": [ { "abstractType": "Regular", "content": "More and more engineering applications need denser and more accurate Digital Terrain Model (DTM) height data. Whereas, collecting additional height data in the field, if not impossible, is either expensive or time consuming or both. Advanced space technology has provided much single high-resolution satellite imageries almost worldwide. This paper discusses the idea of using Shape From Shading (SFS) methods with single high resolution imagery. Regular grids of digital terrain elevations are generated by a four-directional weighed algorithm we propose in this paper. Preliminary results are very encouraging and the methodology is going to be implemented with real satellite imagery and parallel computations", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "More and more engineering applications need denser and more accurate Digital Terrain Model (DTM) height data. Whereas, collecting additional height data in the field, if not impossible, is either expensive or time consuming or both. Advanced space technology has provided much single high-resolution satellite imageries almost worldwide. This paper discusses the idea of using Shape From Shading (SFS) methods with single high resolution imagery. Regular grids of digital terrain elevations are generated by a four-directional weighed algorithm we propose in this paper. Preliminary results are very encouraging and the methodology is going to be implemented with real satellite imagery and parallel computations", "fno": "04097829", "keywords": [ "Four Directional Weighed Algorithm", "Digital Terrain Elevation", "Satellite Imagery", "Digital Terrain Model", "Shape From Shading Method" ], "authors": [ { "affiliation": "Technique Dev. Center, Southwest China Inst. of Electron. Technol., Sichuan", "fullName": "Hongbo Yu", "givenName": null, "surname": "Hongbo Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Technique Dev. Center, Southwest China Inst. of Electron. Technol., Sichuan", "fullName": "Huaixin Chen", "givenName": null, "surname": "Huaixin Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Liping Lei", "givenName": null, "surname": "Liping Lei", "__typename": "ArticleAuthorType" } ], "idPrefix": "icia", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-08-01T00:00:00", "pubType": "proceedings", "pages": "1096-1100", "year": "2006", "issn": null, "isbn": "1-4244-0528-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04097828", "articleId": "12OmNyRPgFO", "__typename": "AdjacentArticleType" }, "next": { "fno": "04097830", "articleId": "12OmNyugyKX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isdea/2012/4608/0/4608b430", "title": "High Resolution Satellite Imagery Rectification Using Bi-linear Interpolation Method for Geometric Data Extraction", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608b430/12OmNARRYCR", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2009/3883/0/3883a613", "title": "Automatic Registration of High-Resolution Multispectral Imageries from Band-Reconfigurable Imaging System on Board Unmanned Airship", "doi": null, "abstractUrl": "/proceedings-article/icig/2009/3883a613/12OmNBCqbzL", "parentPublication": { "id": "proceedings/icig/2009/3883/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ettandgrs/2008/3563/1/3563a242", "title": "Synthesizing Large-Scale Virtual Terrain from Image Atlas", "doi": null, "abstractUrl": "/proceedings-article/ettandgrs/2008/3563a242/12OmNBI6ace", "parentPublication": { "id": "proceedings/ettandgrs/2008/3563/1", "title": "Education Technology and Training &amp; Geoscience and Remote Sensing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a307", "title": "Automatic Geo-location Correction of Satellite Imagery", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a307/12OmNBNM96n", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcai/2009/3615/0/3615a345", "title": "Applying Multi-temporal Satellite Imageries to Estimate Chlorophyll-a Concentration in Feitsui Reservoir Using ANNs", "doi": null, "abstractUrl": "/proceedings-article/jcai/2009/3615a345/12OmNBO3Kf2", "parentPublication": { "id": "proceedings/jcai/2009/3615/0", "title": "2009 International Joint Conference on Artificial Intelligence (JCAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a738", "title": "Minimal Solvers for 3D Geometry from Satellite Imagery", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a738/12OmNwE9OzQ", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ettandgrs/2008/3563/1/3563a583", "title": "The Fine-Scale Direct Solar Radiation Derived for the Rugged Terrain in Chongqing City", "doi": null, "abstractUrl": "/proceedings-article/ettandgrs/2008/3563a583/12OmNx9WSUy", "parentPublication": { "id": "proceedings/ettandgrs/2008/3563/1", "title": "Education Technology and Training &amp; Geoscience and Remote Sensing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2011/4548/0/4548a064", "title": "Real-Time Terrain Modeling Using CPU-GPU Coupled Computation", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2011/4548a064/12OmNzn38KP", "parentPublication": { "id": "proceedings/sibgrapi/2011/4548/0", "title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/04/v0834", "title": "Terrain Synthesis from Digital Elevation Models", "doi": null, "abstractUrl": "/journal/tg/2007/04/v0834/13rRUwInvJa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1998/01/v0082", "title": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "doi": null, "abstractUrl": "/journal/tg/1998/01/v0082/13rRUx0geuY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvEyR7V", "title": "SC Conference", "acronym": "sc", "groupId": "1000729", "volume": "0", "displayVolume": "0", "year": "1996", "__typename": "ProceedingType" }, "article": { "id": "12OmNzBwGyD", "doi": "10.1109/SC.1996.43", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "normalizedTitle": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "abstract": "In this paper, we describe STREN, a parallel stereo renderer for fixed-location terrain rendering tasks required for the simulation of planetary exploration missions. The renderer is based on a novel spatial data representation, called the TANPO map. This data representation stores terrain data using a simple and compact structure and provides excellent locality for such rendering applications. Experimental results show that the renderer not only performs very well, but also scales perfectly to different numbers of processors. Examples of the rendered result is show below using the red/blue stereo display method. Click on the image to view an stereo MPEG movie (2 MBytes).", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we describe STREN, a parallel stereo renderer for fixed-location terrain rendering tasks required for the simulation of planetary exploration missions. The renderer is based on a novel spatial data representation, called the TANPO map. This data representation stores terrain data using a simple and compact structure and provides excellent locality for such rendering applications. Experimental results show that the renderer not only performs very well, but also scales perfectly to different numbers of processors. Examples of the rendered result is show below using the red/blue stereo display method. Click on the image to view an stereo MPEG movie (2 MBytes).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we describe STREN, a parallel stereo renderer for fixed-location terrain rendering tasks required for the simulation of planetary exploration missions. The renderer is based on a novel spatial data representation, called the TANPO map. This data representation stores terrain data using a simple and compact structure and provides excellent locality for such rendering applications. Experimental results show that the renderer not only performs very well, but also scales perfectly to different numbers of processors. Examples of the rendered result is show below using the red/blue stereo display method. Click on the image to view an stereo MPEG movie (2 MBytes).", "fno": "26420008", "keywords": [ "Terrain Rendering", "Stereo", "Parallel Rendering", "Scalibility", "Spatial Data Structure", "Planetary Mission Simulation" ], "authors": [ { "affiliation": "California Institute of Technology, Pasadena, CA", "fullName": "Ansel Teng", "givenName": "Ansel", "surname": "Teng", "__typename": "ArticleAuthorType" }, { "affiliation": "Cray Research Inc, c/o JPL, Pasadena, CA", "fullName": "Scott Whitman", "givenName": "Scott", "surname": "Whitman", "__typename": "ArticleAuthorType" }, { "affiliation": "California Institute of Technology, Pasadena, CA", "fullName": "Meemong Lee", "givenName": "Meemong", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "sc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1996-01-01T00:00:00", "pubType": "proceedings", "pages": "8", "year": "1996", "issn": null, "isbn": "0-89791-854-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26420007", "articleId": "12OmNzYwc0H", "__typename": "AdjacentArticleType" }, "next": { "fno": "26420009", "articleId": "12OmNzEmFF9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/visual/1992/2897/0/00235204", "title": "A scientific visualization renderer", "doi": null, "abstractUrl": "/proceedings-article/visual/1992/00235204/12OmNAkWvbq", "parentPublication": { "id": "proceedings/visual/1992/2897/0", "title": "Proceedings Visualization '92", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2000/0868/0/08680404", "title": "View-Dependent Continuous Level-of-Detail Rendering of Terrain Model", "doi": null, "abstractUrl": "/proceedings-article/pg/2000/08680404/12OmNBUAvZj", "parentPublication": { "id": "proceedings/pg/2000/0868/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esiat/2009/3682/3/3682c716", "title": "Ringlike Level of Detail in Real-Time Terrain Rendering", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682c716/12OmNBtCCDl", "parentPublication": { "id": "proceedings/esiat/2009/3682/3", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/3941/1/3941a198", "title": "Real-Time Rendering for 3D Game Terrain with GPU Optimization", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/3941a198/12OmNBziBaj", "parentPublication": { "id": "proceedings/iccms/2010/3941/3", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/supercomputing/1996/854/0/01392885", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "doi": null, "abstractUrl": "/proceedings-article/supercomputing/1996/01392885/12OmNC8MsyN", "parentPublication": { "id": "proceedings/supercomputing/1996/854/0", "title": "Proceedings of the 1996 ACM/IEEE Conference on Supercomputing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/2/3521b003", "title": "Improved Error Metric of Terrain Rendering for Flying High Over the Terrain", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521b003/12OmNrJRP5e", "parentPublication": { "id": "proceedings/iccet/2009/3521/1", "title": "Computer Engineering and Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/3789a180", "title": "Real-time Rendering System of Large-Scale Terrain in Flight Simulation: Design and Implementation", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a180/12OmNxWLTiF", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780041", "title": "Texturing Techniques for Terrain Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780041/12OmNzVXNRv", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1998/01/v0082", "title": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "doi": null, "abstractUrl": "/journal/tg/1998/01/v0082/13rRUx0geuY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0330", "title": "Stereoscopic View-Dependent Visualization of Terrain Height Fields", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0330/13rRUyfKIHy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCdk2Yv", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNzVXNRv", "doi": "10.1109/VISUAL.2000.885699", "title": "Texturing Techniques for Terrain Visualization", "normalizedTitle": "Texturing Techniques for Terrain Visualization", "abstract": "We present a new rendering technique for processing multiple multiresolution textures of LOD terrain models and describe its application to interactive, animated terrain content design. The approach is based on a multiresolution model for terrain texture which cooperates with a multiresolution model for terrain geometry. For each texture layer, an image pyramid and a texture tree are constructed. Multiple texture layers can be associated with one terrain model and can be combined in different ways, e.g., by blending and masking. The rendering algorithm traverses simultaneously the geometry multiresolution model and the texture multiresolution model, and takes into account geometric and texture approximation errors. It uses multi-pass rendering and exploits multitexturing to achieve real-time performance. Applications include interactive texture lenses, texture animation, and topographic textures. These techniques offer an enormous potential for developing new visualization applications for presenting, exploring and manipulating spatio-temporal data.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new rendering technique for processing multiple multiresolution textures of LOD terrain models and describe its application to interactive, animated terrain content design. The approach is based on a multiresolution model for terrain texture which cooperates with a multiresolution model for terrain geometry. For each texture layer, an image pyramid and a texture tree are constructed. Multiple texture layers can be associated with one terrain model and can be combined in different ways, e.g., by blending and masking. The rendering algorithm traverses simultaneously the geometry multiresolution model and the texture multiresolution model, and takes into account geometric and texture approximation errors. It uses multi-pass rendering and exploits multitexturing to achieve real-time performance. Applications include interactive texture lenses, texture animation, and topographic textures. These techniques offer an enormous potential for developing new visualization applications for presenting, exploring and manipulating spatio-temporal data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new rendering technique for processing multiple multiresolution textures of LOD terrain models and describe its application to interactive, animated terrain content design. The approach is based on a multiresolution model for terrain texture which cooperates with a multiresolution model for terrain geometry. For each texture layer, an image pyramid and a texture tree are constructed. Multiple texture layers can be associated with one terrain model and can be combined in different ways, e.g., by blending and masking. The rendering algorithm traverses simultaneously the geometry multiresolution model and the texture multiresolution model, and takes into account geometric and texture approximation errors. It uses multi-pass rendering and exploits multitexturing to achieve real-time performance. Applications include interactive texture lenses, texture animation, and topographic textures. These techniques offer an enormous potential for developing new visualization applications for presenting, exploring and manipulating spatio-temporal data.", "fno": "64780041", "keywords": [ "Terrain Rendering", "Texture Mapping", "Multiresolution", "Level Of Detail", "3 D Maps" ], "authors": [ { "affiliation": "University of Münster", "fullName": "Jürgen Döllner", "givenName": "Jürgen", "surname": "Döllner", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Münster", "fullName": "Konstantin Baumann", "givenName": "Konstantin", "surname": "Baumann", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Münster", "fullName": "Klaus Hinrichs", "givenName": "Klaus", "surname": "Hinrichs", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-10-01T00:00:00", "pubType": "proceedings", "pages": "41", "year": "2000", "issn": "1070-2385", "isbn": "0-7803-6478-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "64780040", "articleId": "12OmNqG0SYd", "__typename": "AdjacentArticleType" }, "next": { "fno": "64780042", "articleId": "12OmNzBwGnR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgi/2000/0643/0/06430325", "title": "Dynamic 3D Maps and Their Texture-Based Design", "doi": null, "abstractUrl": "/proceedings-article/cgi/2000/06430325/12OmNBSBk6b", "parentPublication": { "id": "proceedings/cgi/2000/0643/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1999/0185/0/01850085", "title": "A Hybrid, Hierarchical Data Structure for Real-Time Terrain Visualization", "doi": null, "abstractUrl": "/proceedings-article/cgi/1999/01850085/12OmNCwladD", "parentPublication": { "id": "proceedings/cgi/1999/0185/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdip/2009/3565/0/3565a132", "title": "Real-Time Visualization of Virtual Huge Texture", "doi": null, "abstractUrl": "/proceedings-article/icdip/2009/3565a132/12OmNrJiCYH", "parentPublication": { "id": "proceedings/icdip/2009/3565/0", "title": "Digital Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970046", "title": "Geo-Spatial Visualization for Situational Awareness", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970046/12OmNx5GU1z", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcasia/2004/2138/0/21380280", "title": "VIsualization for HPC Data - Large Terrain Model", "doi": null, "abstractUrl": "/proceedings-article/hpcasia/2004/21380280/12OmNy50g4O", "parentPublication": { "id": "proceedings/hpcasia/2004/2138/0", "title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970049", "title": "LOD-Sprite Technique for Accelerated Terrain Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970049/12OmNynJMIq", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660028", "title": "View-Dependent Rendering of Multiresolution Texture-Atlases", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660028/12OmNzXWZDD", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498pajarola", "title": "QuadTIN: Quadtree based Triangulated Irregular Networks", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498pajarola/12OmNzw8jgB", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1997/02/mcg1997020018", "title": "Multiresolution Textures from Image Sequences", "doi": null, "abstractUrl": "/magazine/cg/1997/02/mcg1997020018/13rRUxN5eyd", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0330", "title": "Stereoscopic View-Dependent Visualization of Terrain Height Fields", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0330/13rRUyfKIHy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNro0Iam", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNzwHvdp", "doi": "10.1109/CRV.2007.63", "title": "Terrain Modelling for Planetary Exploration", "normalizedTitle": "Terrain Modelling for Planetary Exploration", "abstract": "The success of NASA?s Mars Exploration Rovers has demonstrated the important benefits that mobility adds to planetary exploration. Very soon, mission requirements will impose that planetary exploration rovers drive autonomously in unknown terrain. This will require an evolution of the methods and technologies currently used. This paper presents our approach to 3D terrain reconstruction from large sparse range data sets, and the data reduction achieved through decimation. The outdoor experimental results demonstrate the effectiveness of the reconstructed terrain model for different types of terrain. We also present a first attempt to classify the terrain based on the scans properties.", "abstracts": [ { "abstractType": "Regular", "content": "The success of NASA?s Mars Exploration Rovers has demonstrated the important benefits that mobility adds to planetary exploration. Very soon, mission requirements will impose that planetary exploration rovers drive autonomously in unknown terrain. This will require an evolution of the methods and technologies currently used. This paper presents our approach to 3D terrain reconstruction from large sparse range data sets, and the data reduction achieved through decimation. The outdoor experimental results demonstrate the effectiveness of the reconstructed terrain model for different types of terrain. We also present a first attempt to classify the terrain based on the scans properties.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The success of NASA?s Mars Exploration Rovers has demonstrated the important benefits that mobility adds to planetary exploration. Very soon, mission requirements will impose that planetary exploration rovers drive autonomously in unknown terrain. This will require an evolution of the methods and technologies currently used. This paper presents our approach to 3D terrain reconstruction from large sparse range data sets, and the data reduction achieved through decimation. The outdoor experimental results demonstrate the effectiveness of the reconstructed terrain model for different types of terrain. We also present a first attempt to classify the terrain based on the scans properties.", "fno": "27860243", "keywords": [], "authors": [ { "affiliation": "Canadian Space Agency, Space Technologies", "fullName": "Ioannis Rekleitis", "givenName": "Ioannis", "surname": "Rekleitis", "__typename": "ArticleAuthorType" }, { "affiliation": "Canadian Space Agency, Space Technologies", "fullName": "Jean-Luc Bedwani", "givenName": "Jean-Luc", "surname": "Bedwani", "__typename": "ArticleAuthorType" }, { "affiliation": "Canadian Space Agency, Space Technologies", "fullName": "Sebastien Gemme", "givenName": "Sebastien", "surname": "Gemme", "__typename": "ArticleAuthorType" }, { "affiliation": "Canadian Space Agency, Space Technologies", "fullName": "Tom Lamarche", "givenName": "Tom", "surname": "Lamarche", "__typename": "ArticleAuthorType" }, { "affiliation": "Canadian Space Agency, Space Technologies", "fullName": "Erick Dupuis", "givenName": "Erick", "surname": "Dupuis", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-05-01T00:00:00", "pubType": "proceedings", "pages": "243-249", "year": "2007", "issn": null, "isbn": "0-7695-2786-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04228557", "articleId": "12OmNwDAC8j", "__typename": "AdjacentArticleType" }, "next": { "fno": "27860250", "articleId": "12OmNBPc8Ap", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/supercomputing/1996/854/0/01392885", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "doi": null, "abstractUrl": "/proceedings-article/supercomputing/1996/01392885/12OmNC8MsyN", "parentPublication": { "id": "proceedings/supercomputing/1996/854/0", "title": "Proceedings of the 1996 ACM/IEEE Conference on Supercomputing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2001/0948/0/09480183", "title": "Immersive Environment Technologies for Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/vr/2001/09480183/12OmNqBtiZv", "parentPublication": { "id": "proceedings/vr/2001/0948/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1991/2163/0/00131935", "title": "An integrated walking system for the Ambler planetary rover", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131935/12OmNqzu6Q8", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2006/2644/0/26440149", "title": "SIMSCAPE Terrain Modeling Toolkit", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2006/26440149/12OmNvAiSuA", "parentPublication": { "id": "proceedings/smc-it/2006/2644/0", "title": "2006 2nd IEEE International Conference on Space Mission Challenges for Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aero/2011/7350/0/05747271", "title": "Novel mobility system with active suspension for planetary surface exploration", "doi": null, "abstractUrl": "/proceedings-article/aero/2011/05747271/12OmNwDj11N", "parentPublication": { "id": "proceedings/aero/2011/7350/0", "title": "IEEE Aerospace Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icarsc/2016/2255/0/07781946", "title": "The LRU Rover for Autonomous Planetary Exploration and Its Success in the SpaceBotCamp Challenge", "doi": null, "abstractUrl": "/proceedings-article/icarsc/2016/07781946/12OmNzBOhQE", "parentPublication": { "id": "proceedings/icarsc/2016/2255/0", "title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/1996/2642/0/26420008", "title": "STREN: A Highly Scalable Parallel Stereo Terrain Renderer for Planetary Mission Simulations", "doi": null, "abstractUrl": "/proceedings-article/sc/1996/26420008/12OmNzBwGyD", "parentPublication": { "id": "proceedings/sc/1996/2642/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/2001/1251/0/12510337", "title": "Jini-Based Mobile Agent Architecture for Human Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/tools/2001/12510337/12OmNzVoBFT", "parentPublication": { "id": "proceedings/tools/2001/1251/0", "title": "Technology of Object-Oriented Languages, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/1989/06/r6018", "title": "Ambler: An Autonomous Rover for Planetary Exploration", "doi": null, "abstractUrl": "/magazine/co/1989/06/r6018/13rRUwcS1xE", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smc-it/2021/8560/0/856000a001", "title": "High Performance Computing for Autonomous Planetary Exploration", "doi": null, "abstractUrl": "/proceedings-article/smc-it/2021/856000a001/1ANLfy7Ml32", "parentPublication": { "id": "proceedings/smc-it/2021/8560/0", "title": "2021 IEEE 8th International Conference on Space Mission Challenges for Information Technology (SMC-IT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCmpcNk", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNApu5FI", "doi": "10.1109/VIS.2005.23", "title": "Distributed Data Management for Large Volume Visualization", "normalizedTitle": "Distributed Data Management for Large Volume Visualization", "abstract": "We propose a distributed data management scheme for large data visualization that emphasizes efficient data sharing and access. To minimize data access time and support users with a variety of local computing capabilities, we introduce an adaptive data selection method based on an \"Enhanced Time-Space Partitioning\" (ETSP) tree that assists with effective visibility culling, as well as multiresolution data selection. By traversing the tree, our data management algorithm can quickly identify the visible regions of data, and, for each region, adaptively choose the lowest resolution satisfying userspecified error tolerances. Only necessary data elements are accessed and sent to the visualization pipeline. To further address the issue of sharing large-scale data among geographically distributed collaborative teams, we have designed an infrastructure for integrating our data management technique with a distributed data storage system provided by Logistical Networking (LoN). Data sets at different resolutions are generated and uploaded to LoN for wide-area access. We describe a parallel volume rendering system that verifies the effectiveness of our data storage, selection and access scheme.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a distributed data management scheme for large data visualization that emphasizes efficient data sharing and access. To minimize data access time and support users with a variety of local computing capabilities, we introduce an adaptive data selection method based on an \"Enhanced Time-Space Partitioning\" (ETSP) tree that assists with effective visibility culling, as well as multiresolution data selection. By traversing the tree, our data management algorithm can quickly identify the visible regions of data, and, for each region, adaptively choose the lowest resolution satisfying userspecified error tolerances. Only necessary data elements are accessed and sent to the visualization pipeline. To further address the issue of sharing large-scale data among geographically distributed collaborative teams, we have designed an infrastructure for integrating our data management technique with a distributed data storage system provided by Logistical Networking (LoN). Data sets at different resolutions are generated and uploaded to LoN for wide-area access. We describe a parallel volume rendering system that verifies the effectiveness of our data storage, selection and access scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a distributed data management scheme for large data visualization that emphasizes efficient data sharing and access. To minimize data access time and support users with a variety of local computing capabilities, we introduce an adaptive data selection method based on an \"Enhanced Time-Space Partitioning\" (ETSP) tree that assists with effective visibility culling, as well as multiresolution data selection. By traversing the tree, our data management algorithm can quickly identify the visible regions of data, and, for each region, adaptively choose the lowest resolution satisfying userspecified error tolerances. Only necessary data elements are accessed and sent to the visualization pipeline. To further address the issue of sharing large-scale data among geographically distributed collaborative teams, we have designed an infrastructure for integrating our data management technique with a distributed data storage system provided by Logistical Networking (LoN). Data sets at different resolutions are generated and uploaded to LoN for wide-area access. We describe a parallel volume rendering system that verifies the effectiveness of our data storage, selection and access scheme.", "fno": "27660024", "keywords": [ "Large Data Visualization", "Distributed Storage", "Logistical Networking", "Visibility Culling", "Volume Rendering", "Multiresolution Rendering" ], "authors": [ { "affiliation": "Oak Ridge National Lab", "fullName": "Jinzhu Gao", "givenName": "Jinzhu", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": "The Univ. of Tennessee", "fullName": "Jian Huang", "givenName": "Jian", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Univ. of Tennessee", "fullName": "C. Ryan Johnson", "givenName": "C. Ryan", "surname": "Johnson", "__typename": "ArticleAuthorType" }, { "affiliation": "The Univ. of Tennessee", "fullName": "Scott Atchley", "givenName": "Scott", "surname": "Atchley", "__typename": "ArticleAuthorType" }, { "affiliation": "Oak Ridge National Lab", "fullName": "James Arthur Kohl", "givenName": "James Arthur", "surname": "Kohl", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-10-01T00:00:00", "pubType": "proceedings", "pages": "24", "year": "2005", "issn": null, "isbn": "0-7803-9462-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01532809", "articleId": "12OmNAlNiKF", "__typename": "AdjacentArticleType" }, "next": { "fno": "01532810", "articleId": "12OmNwFzO0f", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2004/8788/0/87880147", "title": "Visibility Culling for Time-Varying Volume Rendering Using Temporal Occlusion Coherence", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880147/12OmNAY79mS", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300045", "title": "Visibility Culling Using Plenoptic Opacity Functions for Large Volume Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300045/12OmNBhZ4fE", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccompanion/2012/4956/0/4956b479", "title": "Abstract: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/sccompanion/2012/4956b479/12OmNxGSmft", "parentPublication": { "id": "proceedings/sccompanion/2012/4956/0", "title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcse/2009/3570/3/3570c276", "title": "The Research of Large-Scale 3D Scenes Rendering Optimization", "doi": null, "abstractUrl": "/proceedings-article/wcse/2009/3570c276/12OmNy3iFfZ", "parentPublication": { "id": "proceedings/wcse/2009/3570/2", "title": "2009 WRI World Congress on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pvg/2001/7223/0/72230093", "title": "Multiresolution View-Dependent Splat Based Volume Rendering of Large Irregular Data", "doi": null, "abstractUrl": "/proceedings-article/pvg/2001/72230093/12OmNya72oP", "parentPublication": { "id": "proceedings/pvg/2001/7223/0", "title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532794", "title": "Distributed data management for large volume visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532794/12OmNzWOB7p", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970060", "title": "Multiresolution Techniques for Interactive Texture-based Volume Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970060/12OmNzw8j09", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/04/v0352", "title": "Multiresolution Representation and Visualization of Volume Data", "doi": null, "abstractUrl": "/journal/tg/1997/04/v0352/13rRUNvgzis", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/02/mcg2003020022", "title": "Enabling View-Dependent Progressive Volume Visualization on the Grid", "doi": null, "abstractUrl": "/magazine/cg/2003/02/mcg2003020022/13rRUwhpBSv", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122135", "title": "Interactive Multiscale Tensor Reconstruction for Multiresolution Volume Visualization", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122135/13rRUyeCkae", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirg", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNvD8Rxs", "doi": "10.1109/VISUAL.2004.95", "title": "Scout: A Hardware-Accelerated System for Quantitatively Driven Visualization and Analysis", "normalizedTitle": "Scout: A Hardware-Accelerated System for Quantitatively Driven Visualization and Analysis", "abstract": "Quantitative techniques for visualization are critical to the successful analysis of both acquired and simulated scientific data. Many visualization techniques rely on indirect mappings, such as transfer functions, to produce the final imagery. In many situations, it is preferable and more powerful to express these mappings as mathematical expressions, or queries, that can then be directly applied to the data. In this paper, we present a hardware-accelerated system that provides such capabilities and exploits current graphics hardware for portions of the computational tasks that would otherwise be executed on the CPU. In our approach, the direct programming of the graphics processor using a concise data parallel language, gives scientists the capability to efficiently explore and visualize data sets.", "abstracts": [ { "abstractType": "Regular", "content": "Quantitative techniques for visualization are critical to the successful analysis of both acquired and simulated scientific data. Many visualization techniques rely on indirect mappings, such as transfer functions, to produce the final imagery. In many situations, it is preferable and more powerful to express these mappings as mathematical expressions, or queries, that can then be directly applied to the data. In this paper, we present a hardware-accelerated system that provides such capabilities and exploits current graphics hardware for portions of the computational tasks that would otherwise be executed on the CPU. In our approach, the direct programming of the graphics processor using a concise data parallel language, gives scientists the capability to efficiently explore and visualize data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Quantitative techniques for visualization are critical to the successful analysis of both acquired and simulated scientific data. Many visualization techniques rely on indirect mappings, such as transfer functions, to produce the final imagery. In many situations, it is preferable and more powerful to express these mappings as mathematical expressions, or queries, that can then be directly applied to the data. In this paper, we present a hardware-accelerated system that provides such capabilities and exploits current graphics hardware for portions of the computational tasks that would otherwise be executed on the CPU. In our approach, the direct programming of the graphics processor using a concise data parallel language, gives scientists the capability to efficiently explore and visualize data sets.", "fno": "87880171", "keywords": [ "Visualization Systems", "Hardware Acceleration", "Multivariate Visualization", "Volume Rendering" ], "authors": [ { "affiliation": "Los Alamos National Laboratory", "fullName": "Patrick S. McCormick", "givenName": "Patrick S.", "surname": "McCormick", "__typename": "ArticleAuthorType" }, { "affiliation": "Los Alamos National Laboratory", "fullName": "Jeff Inman", "givenName": "Jeff", "surname": "Inman", "__typename": "ArticleAuthorType" }, { "affiliation": "Los Alamos National Laboratory", "fullName": "James P. Ahrens", "givenName": "James P.", "surname": "Ahrens", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Charles Hansen", "givenName": "Charles", "surname": "Hansen", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Greg Roth", "givenName": "Greg", "surname": "Roth", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-10-01T00:00:00", "pubType": "proceedings", "pages": "171-178", "year": "2004", "issn": null, "isbn": "0-7803-8788-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "87880163", "articleId": "12OmNwnYFZg", "__typename": "AdjacentArticleType" }, "next": { "fno": "87880179", "articleId": "12OmNyfdOQW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970051", "title": "Accelerating 3D Convolution using Graphics Hardware", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970051/12OmNA0dMPO", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2002/1784/0/17840394", "title": "Visualization of Multidimensional, Multivariate Volume Data Using Hardware-Accelerated Non-Photorealistic Rendering Techniques", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840394/12OmNB9KHwC", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ivapp/2014/8132/0/07294450", "title": "Hardware-accelerated attribute mapping for interactive visualization of complex 3D trajectories", "doi": null, "abstractUrl": "/proceedings-article/ivapp/2014/07294450/12OmNBdru8w", "parentPublication": { "id": "proceedings/ivapp/2014/8132/0", "title": "2014 International Conference on Information Visualization Theory and Applications (IVAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660044", "title": "Hardware-Accelerated Simulated Radiography", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660044/12OmNvUaNmU", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780021", "title": "Combining Local and Remote Visualization Techniques for Interactive Volume Rendering in Medical Applications", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780021/12OmNyrqzty", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880067", "title": "Hardware-Accelerated Adaptive EWA Volume Splatting", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880067/12OmNzX6cvd", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050734", "title": "Distribution-Driven Visualization of Volume Data", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050734/13rRUNvgyWi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/04/v0335", "title": "An Order of Magnitude Faster Isosurface Rendering in Software on a PC than Using Dedicated, General Purpose Rendering Hardware", "doi": null, "abstractUrl": "/journal/tg/2000/04/v0335/13rRUxBJhFk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/02/v0163", "title": "Hardware-Based View-Independent Cell Projection", "doi": null, "abstractUrl": "/journal/tg/2003/02/v0163/13rRUxBa5x2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1061", "title": "Hub-based Simulation and Graphics Hardware Accelerated Visualization for Nanotechnology Applications", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1061/13rRUyfKIHD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvmowTe", "title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis", "acronym": "sccompanion", "groupId": "1802397", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNxGSmft", "doi": "10.1109/SC.Companion.2012.273", "title": "Abstract: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "normalizedTitle": "Abstract: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "abstract": "With the recent development of supercomputers, it is required to efficiently visualize the results of extremely large-scale numerical simulations with a few hundreds to several tens of thousands of parallel processes. Conventional offline-processing visualization approaches are facing difficulties such as transferring large-scale data and reassembly of extensive amount of computational result files, which are inevitable for sort-first or sort-last visualization methods [1]. On the other hand interactive visualization on a supercomputer is still limited. We propose a remote visualization system which has three features. First, our visualization system can avoid the reassembly of the result files. This system can generate rendering primitives on the same number of parallel processes of the numerical simulation. Second, this system enables users to interactively manipulate camera position and time progress. Third, this system is appropriate to overview entire physical values of computational space (volume data) by volume rendering.", "abstracts": [ { "abstractType": "Regular", "content": "With the recent development of supercomputers, it is required to efficiently visualize the results of extremely large-scale numerical simulations with a few hundreds to several tens of thousands of parallel processes. Conventional offline-processing visualization approaches are facing difficulties such as transferring large-scale data and reassembly of extensive amount of computational result files, which are inevitable for sort-first or sort-last visualization methods [1]. On the other hand interactive visualization on a supercomputer is still limited. We propose a remote visualization system which has three features. First, our visualization system can avoid the reassembly of the result files. This system can generate rendering primitives on the same number of parallel processes of the numerical simulation. Second, this system enables users to interactively manipulate camera position and time progress. Third, this system is appropriate to overview entire physical values of computational space (volume data) by volume rendering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the recent development of supercomputers, it is required to efficiently visualize the results of extremely large-scale numerical simulations with a few hundreds to several tens of thousands of parallel processes. Conventional offline-processing visualization approaches are facing difficulties such as transferring large-scale data and reassembly of extensive amount of computational result files, which are inevitable for sort-first or sort-last visualization methods [1]. On the other hand interactive visualization on a supercomputer is still limited. We propose a remote visualization system which has three features. First, our visualization system can avoid the reassembly of the result files. This system can generate rendering primitives on the same number of parallel processes of the numerical simulation. Second, this system enables users to interactively manipulate camera position and time progress. Third, this system is appropriate to overview entire physical values of computational space (volume data) by volume rendering.", "fno": "4956b479", "keywords": [ "Volume Rendering", "Visualization" ], "authors": [ { "affiliation": "Center for Comput. Sci. & E-Syst., Japan Atomic Energy Agency, Kashiwa, Japan", "fullName": "T. Kawamura", "givenName": "T.", "surname": "Kawamura", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Comput. Sci. & E-Syst., Japan Atomic Energy Agency, Kashiwa, Japan", "fullName": "Y. Idomura", "givenName": "Y.", "surname": "Idomura", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Comput. Sci. & E-Syst., Japan Atomic Energy Agency, Kashiwa, Japan", "fullName": "H. Miyamura", "givenName": "H.", "surname": "Miyamura", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Comput. Sci. & E-Syst., Japan Atomic Energy Agency, Kashiwa, Japan", "fullName": "H. Takemiya", "givenName": "H.", "surname": "Takemiya", "__typename": "ArticleAuthorType" } ], "idPrefix": "sccompanion", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "1479-1480", "year": "2012", "issn": null, "isbn": "978-1-4673-3049-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4956b478", "articleId": "12OmNwwMf14", "__typename": "AdjacentArticleType" }, "next": { "fno": "4956b481", "articleId": "12OmNAoUTbm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/scc/2012/6218/0/06496057", "title": "Poster: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/scc/2012/06496057/12OmNAlNiKK", "parentPublication": { "id": "proceedings/scc/2012/6218/0", "title": "2012 SC Companion: High Performance Computing, Networking, Storage and Analysis (SCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccompanion/2012/4956/0/4956b481", "title": "Poster: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/sccompanion/2012/4956b481/12OmNAoUTbm", "parentPublication": { "id": "proceedings/sccompanion/2012/4956/0", "title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pvg/2003/2091/0/20910006", "title": "SLIC: Scheduled Linear Image Compositing for Parallel Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/pvg/2003/20910006/12OmNCdBDUq", "parentPublication": { "id": "proceedings/pvg/2003/2091/0", "title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660038", "title": "Scale-Invariant Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2008/3114/0/3114a372", "title": "A Particle Modeling for Rendering Irregular Volumes", "doi": null, "abstractUrl": "/proceedings-article/uksim/2008/3114a372/12OmNyRg4uB", "parentPublication": { "id": "proceedings/uksim/2008/3114/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scc/2012/6218/0/06496056", "title": "Abstract: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/scc/2012/06496056/12OmNzaQohT", "parentPublication": { "id": "proceedings/scc/2012/6218/0", "title": "2012 SC Companion: High Performance Computing, Networking, Storage and Analysis (SCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/08/ttg2011081164", "title": "Sort-First Parallel Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2011/08/ttg2011081164/13rRUxAATgt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/03/v0242", "title": "Two-Level Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2001/03/v0242/13rRUxC0SOO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/03/v0285", "title": "Hardware-Assisted Visibility Sorting for Unstructured Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2005/03/v0285/13rRUxOdD89", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122335", "title": "Fuzzy Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122335/13rRUyeTVi0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNylsZKi", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNy5hRlW", "doi": "10.1109/VISUAL.1998.745319", "title": "Image-Based Transfer Function Design for Data Exploration in Volume Visualization", "normalizedTitle": "Image-Based Transfer Function Design for Data Exploration in Volume Visualization", "abstract": "Transfer function design is an integrated component in volume visualization and data exploration. The common trial-and-error approach for transfer function searching is a very difficult and time-consuming process. A goal-oriented and parameterized transfer function model is, therefore, crucial in guiding the transfer function searching process for better and more meaningful visualization results. This paper presents an image-based transfer function model that integrates 3D image processing tools into the volume visualization pipeline to facilitate the search for an image-based transfer function in volume data visualization and exploration. The model defines a transfer function as a sequence of 3D image processing procedures, and allows the users to adjust a set of qualitative and descriptive parameters to achieve their subjective visualization goals. 3D image enhancement and boundary detection tools, and their integration methods with volume visualization algorithms are described in this paper. The application of this approach for 3D microscopy data exploration and analysis is also discussed.", "abstracts": [ { "abstractType": "Regular", "content": "Transfer function design is an integrated component in volume visualization and data exploration. The common trial-and-error approach for transfer function searching is a very difficult and time-consuming process. A goal-oriented and parameterized transfer function model is, therefore, crucial in guiding the transfer function searching process for better and more meaningful visualization results. This paper presents an image-based transfer function model that integrates 3D image processing tools into the volume visualization pipeline to facilitate the search for an image-based transfer function in volume data visualization and exploration. The model defines a transfer function as a sequence of 3D image processing procedures, and allows the users to adjust a set of qualitative and descriptive parameters to achieve their subjective visualization goals. 3D image enhancement and boundary detection tools, and their integration methods with volume visualization algorithms are described in this paper. The application of this approach for 3D microscopy data exploration and analysis is also discussed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Transfer function design is an integrated component in volume visualization and data exploration. The common trial-and-error approach for transfer function searching is a very difficult and time-consuming process. A goal-oriented and parameterized transfer function model is, therefore, crucial in guiding the transfer function searching process for better and more meaningful visualization results. This paper presents an image-based transfer function model that integrates 3D image processing tools into the volume visualization pipeline to facilitate the search for an image-based transfer function in volume data visualization and exploration. The model defines a transfer function as a sequence of 3D image processing procedures, and allows the users to adjust a set of qualitative and descriptive parameters to achieve their subjective visualization goals. 3D image enhancement and boundary detection tools, and their integration methods with volume visualization algorithms are described in this paper. The application of this approach for 3D microscopy data exploration and analysis is also discussed.", "fno": "91760319", "keywords": [ "Volume Visualization", "3 D Image Processing", "Transfer Function", "Volume Rendering", "Data Exploration" ], "authors": [ { "affiliation": "Indiana University Purdue University Indianapolis", "fullName": "Shiaofen Fang", "givenName": "Shiaofen", "surname": "Fang", "__typename": "ArticleAuthorType" }, { "affiliation": "Indiana University Purdue University Indianapolis", "fullName": "Tom Biddlecome", "givenName": "Tom", "surname": "Biddlecome", "__typename": "ArticleAuthorType" }, { "affiliation": "Indiana University Purdue University Indianapolis", "fullName": "Mihran Tuceryan", "givenName": "Mihran", "surname": "Tuceryan", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-10-01T00:00:00", "pubType": "proceedings", "pages": "319", "year": "1998", "issn": null, "isbn": "0-8186-9176-x", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "91760313", "articleId": "12OmNA1mbbe", "__typename": "AdjacentArticleType" }, "next": { "fno": "91760327", "articleId": "12OmNwDj1ew", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970038", "title": "Interactive Exploration of Volume Line Integral Convolution Based on 3D-Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970038/12OmNCdk2MV", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970050", "title": "Automating Transfer Function Design for Comprehensible Volume Rendering Based on 3D Field Topology Analysis", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970050/12OmNvkpkRn", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300064", "title": "Adaptive Design of a Global Opacity Transfer Function for Direct Volume Rendering of Ultrasound Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300064/12OmNwfKj94", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kse/2011/4567/0/4567a054", "title": "Volume Visualization and Exploration Based on Semi-automatic Multidimensional Transfer Function Design", "doi": null, "abstractUrl": "/proceedings-article/kse/2011/4567a054/12OmNx7G5W4", "parentPublication": { "id": "proceedings/kse/2011/4567/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300065", "title": "Gaussian Transfer Functions for Multi-Field Volume Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300065/12OmNy6HQVj", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1021", "title": "High-Level User Interfaces for Transfer Function Design with Semantics", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1021/13rRUNvyat9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2008/06/mcs2008060082", "title": "Transfer-Function Specification for Rendering Disparate Volumes", "doi": null, "abstractUrl": "/magazine/cs/2008/06/mcs2008060082/13rRUwIF64Q", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061380", "title": "Size-based Transfer Functions: A New Volume Exploration Technique", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061380/13rRUwIF6l1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061473", "title": "Structuring Feature Space: A Non-Parametric Method for Volumetric Transfer Function Generation", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061473/13rRUwd9CLG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/02/ttg2011020171", "title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization", "doi": null, "abstractUrl": "/journal/tg/2011/02/ttg2011020171/13rRUx0xPTN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwWorv", "title": "Visualisation, International Conference in", "acronym": "viz", "groupId": "1001944", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNzt0IxY", "doi": "10.1109/VIZ.2009.30", "title": "3D Visualization of Medical Image Data Employing 2D Histograms", "normalizedTitle": "3D Visualization of Medical Image Data Employing 2D Histograms", "abstract": "Transfer functions (TF) are a means for improving the visualization of 3D medical image data. If in addition to intensity another property is employed, two-dimensional TFs can be specified. For this, 2D histograms are helpful. In this work we investigate how the property feature size can be used for the definition of 2D TFs and the visualization of medical image data. Furthermore, we compare this method to approaches that employ gradient magnitude as second property. From our experiments with several medical image data we conclude, that structure size enhanced 2D histograms are more intuitive. This is especially true in the clinical area, where physicians are much more familiar with the meaning of the size of anatomical structures than with the concept of gradient magnitude.", "abstracts": [ { "abstractType": "Regular", "content": "Transfer functions (TF) are a means for improving the visualization of 3D medical image data. If in addition to intensity another property is employed, two-dimensional TFs can be specified. For this, 2D histograms are helpful. In this work we investigate how the property feature size can be used for the definition of 2D TFs and the visualization of medical image data. Furthermore, we compare this method to approaches that employ gradient magnitude as second property. From our experiments with several medical image data we conclude, that structure size enhanced 2D histograms are more intuitive. This is especially true in the clinical area, where physicians are much more familiar with the meaning of the size of anatomical structures than with the concept of gradient magnitude.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Transfer functions (TF) are a means for improving the visualization of 3D medical image data. If in addition to intensity another property is employed, two-dimensional TFs can be specified. For this, 2D histograms are helpful. In this work we investigate how the property feature size can be used for the definition of 2D TFs and the visualization of medical image data. Furthermore, we compare this method to approaches that employ gradient magnitude as second property. From our experiments with several medical image data we conclude, that structure size enhanced 2D histograms are more intuitive. This is especially true in the clinical area, where physicians are much more familiar with the meaning of the size of anatomical structures than with the concept of gradient magnitude.", "fno": "3734a153", "keywords": [ "Volume Rendering", "Transfer Functions", "Histogram", "Feature Size" ], "authors": [ { "affiliation": null, "fullName": "Stefan Wesarg", "givenName": "Stefan", "surname": "Wesarg", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Matthias Kirschner", "givenName": "Matthias", "surname": "Kirschner", "__typename": "ArticleAuthorType" } ], "idPrefix": "viz", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-07-01T00:00:00", "pubType": "proceedings", "pages": "153-158", "year": "2009", "issn": null, "isbn": "978-0-7695-3734-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3734a138", "articleId": "12OmNzZWbK2", "__typename": "AdjacentArticleType" }, "next": { "fno": "3734a159", "articleId": "12OmNqJq4u8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csnt/2012/4692/0/4692a149", "title": "3D Modeling and Rendering of 2D Medical Image", "doi": null, "abstractUrl": "/proceedings-article/csnt/2012/4692a149/12OmNAYXWxT", "parentPublication": { "id": "proceedings/csnt/2012/4692/0", "title": "Communication Systems and Network Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c540", "title": "A Spot Segmentation Approach for 2D Gel Electrophoresis Images Based on 2D Histograms", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c540/12OmNBIWXCo", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2009/3866/0/3866a047", "title": "Texture Description in Local Scale Using Texton Histograms with Universal Dictionary", "doi": null, "abstractUrl": "/proceedings-article/dicta/2009/3866a047/12OmNqGA5b2", "parentPublication": { "id": "proceedings/dicta/2009/3866/0", "title": "2009 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/05030233", "title": "InVIS - Interactive Visualization of Medical Data Sets", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/05030233/12OmNy7h3aK", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780021", "title": "Combining Local and Remote Visualization Techniques for Interactive Volume Rendering in Medical Applications", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780021/12OmNyrqzty", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030395", "title": "Semiautomatic Transfer Function Initialization for Abdominal Visualization Using Self-Generating Hierarchical Radial Basis Function Networks", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030395/13rRUwIF6dI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/02/v0208", "title": "Visualization of Boundaries in Volumetric Data Sets Using LH Histograms", "doi": null, "abstractUrl": "/journal/tg/2006/02/v0208/13rRUwbs2aT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/02/ttg2011020192", "title": "Visibility Histograms and Visibility-Driven Transfer Functions", "doi": null, "abstractUrl": "/journal/tg/2011/02/ttg2011020192/13rRUwwaKt3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122345", "title": "Automatic Tuning of Spatially Varying Transfer Functions for Blood Vessel Visualization", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122345/13rRUx0xPIE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/03/mcg2005030031", "title": "Illustration Motifs for Effective Medical Volume Illustration", "doi": null, "abstractUrl": "/magazine/cg/2005/03/mcg2005030031/13rRUyft7x2", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSp8", "title": "Education Technology and Computer Science, International Workshop on", "acronym": "etcs", "groupId": "1002740", "volume": "1", "displayVolume": "1", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAq3hNt", "doi": "10.1109/ETCS.2009.24", "title": "Design of a Prototype for Augmented Reality Defective Bone Repair Simulation System", "normalizedTitle": "Design of a Prototype for Augmented Reality Defective Bone Repair Simulation System", "abstract": "Combining the exiting bone scaffold design system and augmented reality interactive technology, this paper presents an aided planning and test interactive system frame for defective bone repair surgery, and develops the prototype system based on it. The system allows the clinician to carry out the surgical planning and simulation directly using the virtual model, after designing the CAD model of defective bone scaffold. The results, which can be used to validate and modify the bone scaffold model.", "abstracts": [ { "abstractType": "Regular", "content": "Combining the exiting bone scaffold design system and augmented reality interactive technology, this paper presents an aided planning and test interactive system frame for defective bone repair surgery, and develops the prototype system based on it. The system allows the clinician to carry out the surgical planning and simulation directly using the virtual model, after designing the CAD model of defective bone scaffold. The results, which can be used to validate and modify the bone scaffold model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Combining the exiting bone scaffold design system and augmented reality interactive technology, this paper presents an aided planning and test interactive system frame for defective bone repair surgery, and develops the prototype system based on it. The system allows the clinician to carry out the surgical planning and simulation directly using the virtual model, after designing the CAD model of defective bone scaffold. The results, which can be used to validate and modify the bone scaffold model.", "fno": "3557a066", "keywords": [ "Augmented Reality", "Bone Scaffold Design", "Defective Bone Repair", "Visualization Technology", "Human Machine Interaction" ], "authors": [ { "affiliation": null, "fullName": "Yuan Yao", "givenName": "Yuan", "surname": "Yao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiu-xiang Pang", "givenName": "Xiu-xiang", "surname": "Pang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tao Liu", "givenName": "Tao", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qing-xi Hu", "givenName": "Qing-xi", "surname": "Hu", "__typename": "ArticleAuthorType" } ], "idPrefix": "etcs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-03-01T00:00:00", "pubType": "proceedings", "pages": "66-70", "year": "2009", "issn": null, "isbn": "978-0-7695-3557-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3557a062", "articleId": "12OmNqzLHQg", "__typename": "AdjacentArticleType" }, "next": { "fno": "3557a071", "articleId": "12OmNxWuisn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2008/1971/0/04480747", "title": "An Empirical Study of Hear-Through Augmented Reality: Using Bone Conduction to Deliver Spatialized Audio", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480747/12OmNB8kHRs", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isar/2001/1375/0/13750197", "title": "Augmented Reality as a New Media Experience", "doi": null, "abstractUrl": "/proceedings-article/isar/2001/13750197/12OmNy1SFK8", "parentPublication": { "id": "proceedings/isar/2001/1375/0", "title": "Proceedings IEEE and ACM International Symposium on Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1997/7984/0/79840031", "title": "Interaction between Real and Virtual Humans in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ca/1997/79840031/12OmNyKJijf", "parentPublication": { "id": "proceedings/ca/1997/7984/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770761", "title": "Augmented Reality Interface Toolkit", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770761/12OmNyUnELp", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a013", "title": "Robot Programming Using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a013/12OmNz5JBSP", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/2/3507b559", "title": "Study on Generation of Macro-pores of Bionic Bone Scaffold Based on Knapsack Problem Model", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507b559/12OmNzVGcNU", "parentPublication": { "id": "proceedings/csie/2009/3507/2", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2012/04/tth2012040344", "title": "Impulse-Based Rendering Methods for Haptic Simulation of Bone-Burring", "doi": null, "abstractUrl": "/journal/th/2012/04/tth2012040344/13rRUwhHcQZ", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2012/07/mco2012070032", "title": "Projection-Based Augmented Reality in Disney Theme Parks", "doi": null, "abstractUrl": "/magazine/co/2012/07/mco2012070032/13rRUyoyhJq", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a180", "title": "The Preparation of High Performance Gelatin/Hyaluronic Acid Sponge Bone Scaffold", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a180/1J6hAeK489O", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a142", "title": "3D printing of bioceramic/polycaprolactone composite scaffolds for bone tissue engineering", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a142/1J6hCI5IlWM", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbCrVD", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNAsTgR0", "doi": "10.1109/VISUAL.1995.480791", "title": "Interactive Realism for Visualization Using Ray Tracing", "normalizedTitle": "Interactive Realism for Visualization Using Ray Tracing", "abstract": "Visual realism is necessary for many virtual reality applications. In order to convince the user that the virtual environment is real, the scene presented should faithfully model the expected actual environment. A highly accurate, fully modeled, interactive environment is thus seen as \"virtually real.\"This paper addresses the problem of interactive visual realism and discusses a possible solution: a hybrid rendering paradigm that ties distributed graphics hardware and ray tracing systems together for use in interactive, high visual realism applications.This new paradigm is examined in the context of a working rendering system. This system is capable of producing images of higher fidelity than possible through the use of graphics hardware alone, able both to render images at speeds useful for interactive systems and to progressively refine static, high quality snapshots.", "abstracts": [ { "abstractType": "Regular", "content": "Visual realism is necessary for many virtual reality applications. In order to convince the user that the virtual environment is real, the scene presented should faithfully model the expected actual environment. A highly accurate, fully modeled, interactive environment is thus seen as \"virtually real.\"This paper addresses the problem of interactive visual realism and discusses a possible solution: a hybrid rendering paradigm that ties distributed graphics hardware and ray tracing systems together for use in interactive, high visual realism applications.This new paradigm is examined in the context of a working rendering system. This system is capable of producing images of higher fidelity than possible through the use of graphics hardware alone, able both to render images at speeds useful for interactive systems and to progressively refine static, high quality snapshots.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual realism is necessary for many virtual reality applications. In order to convince the user that the virtual environment is real, the scene presented should faithfully model the expected actual environment. A highly accurate, fully modeled, interactive environment is thus seen as \"virtually real.\"This paper addresses the problem of interactive visual realism and discusses a possible solution: a hybrid rendering paradigm that ties distributed graphics hardware and ray tracing systems together for use in interactive, high visual realism applications.This new paradigm is examined in the context of a working rendering system. This system is capable of producing images of higher fidelity than possible through the use of graphics hardware alone, able both to render images at speeds useful for interactive systems and to progressively refine static, high quality snapshots.", "fno": "71870019", "keywords": [ "Ray Tracing", "Visual Realism", "Virtual Reality", "Distributed Rendering" ], "authors": [ { "affiliation": "Naval Research Laboratory", "fullName": "Robert A. Cross", "givenName": "Robert A.", "surname": "Cross", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-10-01T00:00:00", "pubType": "proceedings", "pages": "19", "year": "1995", "issn": "1070-2385", "isbn": "0-8186-7187-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "71870011", "articleId": "12OmNzZmZv2", "__typename": "AdjacentArticleType" }, "next": { "fno": "71870027", "articleId": "12OmNxHJ9p1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzSh1bf", "title": "Proceedings VIS 2001. Visualization 2001", "acronym": "visual", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNwNwzCe", "doi": "10.1109/VISUAL.2001.964561", "title": "Virtual temporal bone dissection: a case study", "normalizedTitle": "Virtual temporal bone dissection: a case study", "abstract": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "abstracts": [ { "abstractType": "Regular", "content": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "fno": "00964561", "keywords": [ "Computer Aided Instruction", "Virtual Reality", "Surgery", "Medical Computing", "Digital Simulation", "Computer Based Training", "Virtual Dissection", "Human Temporal Bone", "Anatomy", "Surgical Procedures", "Direct Volume Visualization", "Virtual Environment", "Learning", "Learning Anatomy", "Learning Surgical Technique", "Bones", "Computer Aided Software Engineering", "Surgery", "Anatomy", "Computational Modeling", "Computer Simulation", "Computer Graphics", "Supercomputers", "Humans", "Visualization" ], "authors": [ { "affiliation": "Ohio Supercomput. Center, OH, USA", "fullName": "J. Bryan", "givenName": "J.", "surname": "Bryan", "__typename": "ArticleAuthorType" }, { "affiliation": "Ohio Supercomput. Center, OH, USA", "fullName": "D. Stredney", "givenName": "D.", "surname": "Stredney", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "G. Wiet", "givenName": "G.", "surname": "Wiet", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "D. Sessanna", "givenName": "D.", "surname": "Sessanna", "__typename": "ArticleAuthorType" } ], "idPrefix": "visual", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-01-01T00:00:00", "pubType": "proceedings", "pages": "497-598", "year": "2001", "issn": null, "isbn": "0-7803-7201-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00964560", "articleId": "12OmNyQGS5W", "__typename": "AdjacentArticleType" }, "next": { "fno": "00964563", "articleId": "12OmNxbmSAI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccrd/2010/4043/0/4043a182", "title": "Virtual Animal Slaughtering and Dissection via Global Navigation Elements", "doi": null, "abstractUrl": "/proceedings-article/iccrd/2010/4043a182/12OmNAndiyr", "parentPublication": { "id": "proceedings/iccrd/2010/4043/0", "title": "Computer Research and Development, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2015/6775/0/6775a310", "title": "Region-Specific Automated Feedback in Temporal Bone Surgery Simulation", "doi": null, "abstractUrl": "/proceedings-article/cbms/2015/6775a310/12OmNCmGNS7", "parentPublication": { "id": "proceedings/cbms/2015/6775/0", "title": "2015 IEEE 28th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b916", "title": "Transfer Learning of a Temporal Bone Performance Model via Anatomical Feature Registration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b916/12OmNqAU6w2", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2002/1492/0/14920209", "title": "Real-Time Haptic and Visual Simulation of Bone Dissection", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920209/12OmNqEAT3R", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2003/1882/0/18820102", "title": "Adaptive techniques for real-time haptic and visual simulation of bone dissection", "doi": null, "abstractUrl": "/proceedings-article/vr/2003/18820102/12OmNrYCXNP", "parentPublication": { "id": "proceedings/vr/2003/1882/0", "title": "Proceedings IEEE Virtual Reality 2003", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200bryan", "title": "Virtual Temporal Bone Dissection: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200bryan/12OmNwxlrd4", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2017/1710/0/1710a007", "title": "Design and Evaluation of a Virtual Reality Simulation Module for Training Advanced Temporal Bone Surgery", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a007/12OmNyen1q8", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802077", "title": "An AR edutainment system supporting bone anatomy learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802077/12OmNylKAKS", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2019/2286/0/228600a495", "title": "The Effect of Practice Distribution on Skill Retention in Virtual Reality Temporal Bone Surgery Training", "doi": null, "abstractUrl": "/proceedings-article/cbms/2019/228600a495/1cdO0grsecg", "parentPublication": { "id": "proceedings/cbms/2019/2286/0", "title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCfAPCa", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNwxlrd4", "doi": "10.1109/VISUAL.2001.964561", "title": "Virtual Temporal Bone Dissection: A Case Study", "normalizedTitle": "Virtual Temporal Bone Dissection: A Case Study", "abstract": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "abstracts": [ { "abstractType": "Regular", "content": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Temporal Bone Dissection Simulator is an ongoing research project for the construction of a synthetic environment suitable for virtual dissection of human temporal bone and related anatomy. Funded by the National Institute on Deafness and Other Communication Disorders (NIDCD), the primary goal of this project is to provide a safe, robust, and cost-effective virtual environment for learning the anatomy and surgical procedures associated with the temporal bone. Direct volume visualization has been indispensable for the necessary level of realism and interactivity that is vital to the success of this project. This work is being conducted by the Ohio Supercomputer Center in conjunction with the Department of Otolaryngology at the Ohio State University, and NIDCD.", "fno": "7200bryan", "keywords": [ "Three Dimensional Graphics And Realism", "Virtual Reality", "Applications", "Types Of Simulation", "Visual", "Surgery", "Health", "Temporal Bone Dissection" ], "authors": [ { "affiliation": "The Ohio State University", "fullName": "Jason Bryan", "givenName": "Jason", "surname": "Bryan", "__typename": "ArticleAuthorType" }, { "affiliation": "The Ohio State University", "fullName": "Don Stredney", "givenName": "Don", "surname": "Stredney", "__typename": "ArticleAuthorType" }, { "affiliation": "The Ohio State University", "fullName": "Greg Wiet", "givenName": "Greg", "surname": "Wiet", "__typename": "ArticleAuthorType" }, { "affiliation": "The Ohio State University", "fullName": "Dennis Sessanna", "givenName": "Dennis", "surname": "Sessanna", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-10-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2001", "issn": "1070-2385", "isbn": "0-7803-7200-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7200bartoli", "articleId": "12OmNzaQod8", "__typename": "AdjacentArticleType" }, "next": { "fno": "7200museth", "articleId": "12OmNwwd2S3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1JC1F8KcINO", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "acronym": "bibm", "groupId": "9994793", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JC37R9l9hS", "doi": "10.1109/BIBM55620.2022.9995558", "title": "A Global-Local Cascade Network for Multi-bone Segmentation in Chest CT", "normalizedTitle": "A Global-Local Cascade Network for Multi-bone Segmentation in Chest CT", "abstract": "Chest multi-bone segmentation plays an important role in clinical applications such as metastasis analysis and surgical planning. The global context between bones is critical for accurately identifying bone sub-classes; meanwhile, the influence of local details on bone segmentation accuracy cannot be ignored. However, the global context and local details are challenging to trade off under hardware and computation cost limitations. In this study, we propose Bone-Net that simultaneously leverages global context and local details by employing a two-stage cascaded framework for multi-bone segmentation in chest CT. In the first stage, we feed low-resolution large-scale images to bidirectional feature pyramid network to extract global context and perform multi-bone category prediction. In the second stage, we propose a global-guided gating module to link global context and local details to refine bone segmentation. During inference, we obtained the multi-bone segmentation results by rendering the category information into the segmentation results. Furthermore, we design a novel loss function to alleviate the category confusion problem for adjacent bones. The proposed method is trained and evaluated on a challenging dataset of 250 chest CT scans from various centers and scanners. Experimental results show that the proposed Bone-Net achieves high performance (84.1&#x0025; of dice similarity coefficient, 87.7&#x0025; of recall, and 80.7&#x0025; of precision) and outperforms five benchmarks for multi-bone segmentation.", "abstracts": [ { "abstractType": "Regular", "content": "Chest multi-bone segmentation plays an important role in clinical applications such as metastasis analysis and surgical planning. The global context between bones is critical for accurately identifying bone sub-classes; meanwhile, the influence of local details on bone segmentation accuracy cannot be ignored. However, the global context and local details are challenging to trade off under hardware and computation cost limitations. In this study, we propose Bone-Net that simultaneously leverages global context and local details by employing a two-stage cascaded framework for multi-bone segmentation in chest CT. In the first stage, we feed low-resolution large-scale images to bidirectional feature pyramid network to extract global context and perform multi-bone category prediction. In the second stage, we propose a global-guided gating module to link global context and local details to refine bone segmentation. During inference, we obtained the multi-bone segmentation results by rendering the category information into the segmentation results. Furthermore, we design a novel loss function to alleviate the category confusion problem for adjacent bones. The proposed method is trained and evaluated on a challenging dataset of 250 chest CT scans from various centers and scanners. Experimental results show that the proposed Bone-Net achieves high performance (84.1&#x0025; of dice similarity coefficient, 87.7&#x0025; of recall, and 80.7&#x0025; of precision) and outperforms five benchmarks for multi-bone segmentation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Chest multi-bone segmentation plays an important role in clinical applications such as metastasis analysis and surgical planning. The global context between bones is critical for accurately identifying bone sub-classes; meanwhile, the influence of local details on bone segmentation accuracy cannot be ignored. However, the global context and local details are challenging to trade off under hardware and computation cost limitations. In this study, we propose Bone-Net that simultaneously leverages global context and local details by employing a two-stage cascaded framework for multi-bone segmentation in chest CT. In the first stage, we feed low-resolution large-scale images to bidirectional feature pyramid network to extract global context and perform multi-bone category prediction. In the second stage, we propose a global-guided gating module to link global context and local details to refine bone segmentation. During inference, we obtained the multi-bone segmentation results by rendering the category information into the segmentation results. Furthermore, we design a novel loss function to alleviate the category confusion problem for adjacent bones. The proposed method is trained and evaluated on a challenging dataset of 250 chest CT scans from various centers and scanners. Experimental results show that the proposed Bone-Net achieves high performance (84.1% of dice similarity coefficient, 87.7% of recall, and 80.7% of precision) and outperforms five benchmarks for multi-bone segmentation.", "fno": "09995558", "keywords": [ "Bone", "Computerised Tomography", "Feature Extraction", "Image Classification", "Image Registration", "Image Segmentation", "Medical Image Processing", "Bidirectional Feature Pyramid Network", "Bone Segmentation", "Bone Net", "Category Confusion Problem", "Chest CT", "Chest Multibone Segmentation", "Global Guided Gating Module", "Global Local Cascade Network", "Multibone Category Prediction", "Image Segmentation", "Computed Tomography", "Surgery", "Benchmark Testing", "Bones", "Feature Extraction", "Rendering Computer Graphics", "Chest CT", "Deep Learning", "Medical Image Segmentation", "Multi Bone Segmentation", "Convolution Neural Network" ], "authors": [ { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Yanfeng Sun", "givenName": "Yanfeng", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Han Kang", "givenName": "Han", "surname": "Kang", "__typename": "ArticleAuthorType" }, { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Huan Zhang", "givenName": "Huan", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Yu Wang", "givenName": "Yu", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Hong Shen", "givenName": "Hong", "surname": "Shen", "__typename": "ArticleAuthorType" }, { "affiliation": "Infervision Medical Technology Co., Ltd.,Beijing,China", "fullName": "Pengxin Yu", "givenName": "Pengxin", "surname": "Yu", "__typename": "ArticleAuthorType" } ], "idPrefix": "bibm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "809-812", "year": "2022", "issn": null, "isbn": "978-1-6654-6819-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09995323", "articleId": "1JC38urmFzy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09995191", "articleId": "1JC26lcdgqY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hpcs/2017/3250/0/08035158", "title": "Algorithmic Quantification of Skull Bone Density", "doi": null, "abstractUrl": "/proceedings-article/hpcs/2017/08035158/12OmNB1eJzj", "parentPublication": { "id": "proceedings/hpcs/2017/3250/0", "title": "2017 International Conference on High-Performance Computing & Simulation (HPCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2016/3906/0/3906a231", "title": "Maxillary Sinus Cyst Drug Combination in the Treatment of Maxillary Bone Cyst", "doi": null, "abstractUrl": "/proceedings-article/itme/2016/3906a231/12OmNBSBkhF", "parentPublication": { "id": "proceedings/itme/2016/3906/0", "title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477606", "title": "Accurate 3D bone segmentation in challenging CT images: Bottom-up parsing and contextualized optimization", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477606/12OmNwMFMli", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456534", "title": "CUDA-Accelerated Volume Rendering of Two-phase Bone", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456534/12OmNzICEO1", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiai/2016/9919/0/07459162", "title": "Towards automatic 3D bone marrow segmentation", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2016/07459162/12OmNzllxYS", "parentPublication": { "id": "proceedings/ssiai/2016/9919/0", "title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/06/mcg2014060016", "title": "Using Global Illumination in Volume Visualization of Rheumatoid Arthritis CT Data", "doi": null, "abstractUrl": "/magazine/cg/2014/06/mcg2014060016/13rRUwvT9lI", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669726", "title": "Boosting Segmentation Performance across Datasets using Histogram Specification with Application to Pelvic Bone Segmentation", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669726/1A9Wu5sSgIE", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a409", "title": "Isometric Convolutional Neural Networks for Bone Suppression of Multi-Planar Dual Energy Chest Radiograph", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a409/1GU6PQpGmha", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2021/01/09345435", "title": "COVID-19 Chest CT Image Segmentation Network by Multi-Scale Fusion and Enhancement Operations", "doi": null, "abstractUrl": "/journal/bd/2021/01/09345435/1qTYEs9wmYg", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicas/2020/9085/0/908500a214", "title": "Automatic Segmentation of Metastasis Lesions in SPECT Bone Scan Images", "doi": null, "abstractUrl": "/proceedings-article/icicas/2020/908500a214/1sZ2WG2VtF6", "parentPublication": { "id": "proceedings/icicas/2020/9085/0", "title": "2020 International Conference on Intelligent Computing, Automation and Systems (ICICAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz5JC1b", "title": "Virtual Reality Annual International Symposium", "acronym": "vrais", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "1996", "__typename": "ProceedingType" }, "article": { "id": "1fw1LZWjeh2", "doi": "10.1109/VRAIS.1996.490513", "title": "Multisensory platform for surgical simulation", "normalizedTitle": "Multisensory platform for surgical simulation", "abstract": "Advanced display technologies have made the virtual exploration of relatively complex models feasible in many applications. Unfortunately, only a few human interfaces allow natural interaction with the environment. Moreover in surgical applications, such realistic interaction requires real time rendering of volumetric data-placing an overwhelming performance burden on the system. We report on a collaboration of a unique interdisciplinary group developing a virtual reality system that provides intuitive interaction with complex volume data by employing real time realistic volume rendering and convincing force feedback (haptic) sensations. We describe our rendering methods and the haptic devices in detail and demonstrate the utilization of this system in the real world application of Endoscopic Sinus Surgery (ESS) simulation.", "abstracts": [ { "abstractType": "Regular", "content": "Advanced display technologies have made the virtual exploration of relatively complex models feasible in many applications. Unfortunately, only a few human interfaces allow natural interaction with the environment. Moreover in surgical applications, such realistic interaction requires real time rendering of volumetric data-placing an overwhelming performance burden on the system. We report on a collaboration of a unique interdisciplinary group developing a virtual reality system that provides intuitive interaction with complex volume data by employing real time realistic volume rendering and convincing force feedback (haptic) sensations. We describe our rendering methods and the haptic devices in detail and demonstrate the utilization of this system in the real world application of Endoscopic Sinus Surgery (ESS) simulation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Advanced display technologies have made the virtual exploration of relatively complex models feasible in many applications. Unfortunately, only a few human interfaces allow natural interaction with the environment. Moreover in surgical applications, such realistic interaction requires real time rendering of volumetric data-placing an overwhelming performance burden on the system. We report on a collaboration of a unique interdisciplinary group developing a virtual reality system that provides intuitive interaction with complex volume data by employing real time realistic volume rendering and convincing force feedback (haptic) sensations. We describe our rendering methods and the haptic devices in detail and demonstrate the utilization of this system in the real world application of Endoscopic Sinus Surgery (ESS) simulation.", "fno": "00490513", "keywords": [ "Medical Computing", "Surgery", "Virtual Reality", "Rendering Computer Graphics", "Interactive Systems", "Real Time Systems", "Digital Simulation", "Multisensory Platform", "Surgical Simulation", "Advanced Display Technologies", "Virtual Exploration", "Human Interfaces", "Natural Interaction", "Surgical Applications", "Realistic Interaction", "Real Time Rendering", "Volumetric Data", "Performance Burden", "Interdisciplinary Group", "Virtual Reality System", "Intuitive Interaction", "Complex Volume Data", "Real Time Realistic Volume Rendering", "Force Feedback Sensations", "Rendering Methods", "Haptic Devices", "Real World Application", "Endoscopic Sinus Surgery Simulation", "Surgery", "Real Time Systems", "Haptic Interfaces", "Displays", "Humans", "Rendering Computer Graphics", "Collaboration", "Virtual Reality", "Force Feedback", "Electronic Switching Systems" ], "authors": [ { "affiliation": "Dept. of Comput. & Inf. Sci., Ohio State Univ., Columbus, OH, USA", "fullName": "R. Yagel", "givenName": "R.", "surname": "Yagel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "D. Stredney", "givenName": "D.", "surname": "Stredney", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "G.J. Wiet", "givenName": "G.J.", "surname": "Wiet", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "P. Schmalbrock", "givenName": "P.", "surname": "Schmalbrock", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "L. Rosenberg", "givenName": "L.", "surname": "Rosenberg", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "D.J. Sessanna", "givenName": "D.J.", "surname": "Sessanna", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Y. Kurzion", "givenName": "Y.", "surname": "Kurzion", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "S. King", "givenName": "S.", "surname": "King", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrais", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1996-01-01T00:00:00", "pubType": "proceedings", "pages": "72-78", "year": "1996", "issn": null, "isbn": "0-8186-7296-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "72950066", "articleId": "12OmNCesr2I", "__typename": "AdjacentArticleType" }, "next": { "fno": "72950079", "articleId": "12OmNzQR1oS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzsrwbn", "title": "2017 International Conference on Information Systems and Computer Science (INCISCOS)", "acronym": "inciscos", "groupId": "1825124", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNAkWvh8", "doi": "10.1109/INCISCOS.2017.32", "title": "Radial Tree in Bunches: Optimizing the Use of Space in the Visualization of Radial Trees", "normalizedTitle": "Radial Tree in Bunches: Optimizing the Use of Space in the Visualization of Radial Trees", "abstract": "The graphical representation of information hierarchies makes it easy to recognize the relationships between the different elements of such structures. The radial tree is a technique for drawing hierarchies that is visually attractive, intuitive and uses space in an efficient way. Although the technique was developed in the late 1970s, it has recently acquired more interest due to its use in several modern information visualization systems. In this paper, we present a brief review of the basic algorithms to drawing radial trees and propose a new algorithm that allow to make a more efficient use of the available space.", "abstracts": [ { "abstractType": "Regular", "content": "The graphical representation of information hierarchies makes it easy to recognize the relationships between the different elements of such structures. The radial tree is a technique for drawing hierarchies that is visually attractive, intuitive and uses space in an efficient way. Although the technique was developed in the late 1970s, it has recently acquired more interest due to its use in several modern information visualization systems. In this paper, we present a brief review of the basic algorithms to drawing radial trees and propose a new algorithm that allow to make a more efficient use of the available space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The graphical representation of information hierarchies makes it easy to recognize the relationships between the different elements of such structures. The radial tree is a technique for drawing hierarchies that is visually attractive, intuitive and uses space in an efficient way. Although the technique was developed in the late 1970s, it has recently acquired more interest due to its use in several modern information visualization systems. In this paper, we present a brief review of the basic algorithms to drawing radial trees and propose a new algorithm that allow to make a more efficient use of the available space.", "fno": "2644a369", "keywords": [ "Data Visualisation", "Trees Mathematics", "Radial Tree", "Information Hierarchies", "Modern Information Visualization Systems", "Graphical Representation", "Visualization", "Silicon Compounds", "Information Systems", "Computer Science", "Silicon", "Tree Drawing", "Radial Tree", "Information Visualization" ], "authors": [ { "affiliation": null, "fullName": "Armando Arce-Orozco", "givenName": "Armando", "surname": "Arce-Orozco", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luis Camacho-Valerio", "givenName": "Luis", "surname": "Camacho-Valerio", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Steven Madrigal-Quesada", "givenName": "Steven", "surname": "Madrigal-Quesada", "__typename": "ArticleAuthorType" } ], "idPrefix": "inciscos", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-11-01T00:00:00", "pubType": "proceedings", "pages": "369-374", "year": "2017", "issn": null, "isbn": "978-1-5386-2644-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2644a361", "articleId": "12OmNwBT1iF", "__typename": "AdjacentArticleType" }, "next": { "fno": "2644a375", "articleId": "12OmNxWcHkH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-infovis/2000/0804/0/08040057", "title": "Focus+Context Display and Navigation Techniques for Enhancing Radial, Space-Filling Hierarchy Visualizations", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2000/08040057/12OmNC3Xhvi", "parentPublication": { "id": "proceedings/ieee-infovis/2000/0804/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880479", "title": "Labeled Radial Drawing of Data Structures", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880479/12OmNwDACwp", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdew/2005/2657/0/22851168", "title": "Efficient Evaluation of Radial Queries using the Target Tree", "doi": null, "abstractUrl": "/proceedings-article/icdew/2005/22851168/12OmNx9WSTI", "parentPublication": { "id": "proceedings/icdew/2005/2657/0", "title": "21st International Conference on Data Engineering Workshops (ICDEW'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a229", "title": "Voronoi Diagram Based Dimensional Anchor Assessment for Radial Visualizations", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a229/12OmNxVDuUI", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2002/1751/0/17510085", "title": "A Space-Optimized Tree Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2002/17510085/12OmNzy7uV8", "parentPublication": { "id": "proceedings/ieee-infovis/2002/1751/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122440", "title": "Evaluation of Traditional, Orthogonal, and Radial Tree Diagrams by an Eye Tracking Study", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122440/13rRUwI5U7W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/04/ttg2011040393", "title": "The Design Space of Implicit Hierarchy Visualization: A Survey", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040393/13rRUwbs20T", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050759", "title": "A Survey of Radial Methods for Information Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050759/13rRUx0xPZv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875958", "title": "Cupid: Cluster-Based Exploration of Geometry Generators with Parallel Coordinates and Radial Trees", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875958/13rRUxZ0o1C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/03/v0583", "title": "A Radial Adaptation of the Sugiyama Framework for Visualizing Hierarchical Information", "doi": null, "abstractUrl": "/journal/tg/2007/03/v0583/13rRUyY28Ym", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNA0MYYl", "title": "Computer and Information Technology, IEEE 8th International Conference on", "acronym": "citworkshops", "groupId": "1001948", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNAs2tql", "doi": "10.1109/CIT.2008.Workshops.100", "title": "Anisotropic Diffusion Transform Based on Directions of Edges", "normalizedTitle": "Anisotropic Diffusion Transform Based on Directions of Edges", "abstract": "This paper provides the derivation of speckle reducing anisotropic diffusion (SRAD), anisotropic diffusion method tailored to ultrasonic imaging applications. The conventional anisotropic diffusion is performed in four directions??without condition in the previous method. In this paper, a new anisotropic diffusion transform based on directions of edges is proposed. The proposed diffusion is not performed at a pixel which is not an edge. The range of proposed diffusion is selected adaptively according to the number of the directions of edges. Experiments results show that the process time of the proposed method over conventional methods, can be faster slightly. Also, the proposed method can improve the image quality of diagonal edges in ultrasound image with speckle noise.", "abstracts": [ { "abstractType": "Regular", "content": "This paper provides the derivation of speckle reducing anisotropic diffusion (SRAD), anisotropic diffusion method tailored to ultrasonic imaging applications. The conventional anisotropic diffusion is performed in four directions??without condition in the previous method. In this paper, a new anisotropic diffusion transform based on directions of edges is proposed. The proposed diffusion is not performed at a pixel which is not an edge. The range of proposed diffusion is selected adaptively according to the number of the directions of edges. Experiments results show that the process time of the proposed method over conventional methods, can be faster slightly. Also, the proposed method can improve the image quality of diagonal edges in ultrasound image with speckle noise.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper provides the derivation of speckle reducing anisotropic diffusion (SRAD), anisotropic diffusion method tailored to ultrasonic imaging applications. The conventional anisotropic diffusion is performed in four directions??without condition in the previous method. In this paper, a new anisotropic diffusion transform based on directions of edges is proposed. The proposed diffusion is not performed at a pixel which is not an edge. The range of proposed diffusion is selected adaptively according to the number of the directions of edges. Experiments results show that the process time of the proposed method over conventional methods, can be faster slightly. Also, the proposed method can improve the image quality of diagonal edges in ultrasound image with speckle noise.", "fno": "3242a396", "keywords": [ "Anisotropic Diffusion", "Speckle", "Gradinet" ], "authors": [ { "affiliation": null, "fullName": "Hye Suk Kim", "givenName": "Hye Suk", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hyo Sun Yoon", "givenName": "Hyo Sun", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nguyen Dinh Toan", "givenName": "Nguyen Dinh", "surname": "Toan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Guee Sang Lee", "givenName": "Guee Sang", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "citworkshops", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-07-01T00:00:00", "pubType": "proceedings", "pages": "396-400", "year": "2008", "issn": null, "isbn": "978-0-7695-3242-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3242a483", "articleId": "12OmNySXF2T", "__typename": "AdjacentArticleType" }, "next": { "fno": "3242a401", "articleId": "12OmNvnOwtk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2008/3273/0/3273a198", "title": "Speckle Reducing Anisotropic Diffusion Based on Directions of Gradient", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a198/12OmNA0MZ73", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2008/3165/0/3165a029", "title": "Noise Filtering Using Edge-Driven Adaptive Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/cbms/2008/3165a029/12OmNB1wkMx", "parentPublication": { "id": "proceedings/cbms/2008/3165/0", "title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/1/290910191", "title": "An Improved Anisotropic Diffusion PDE for Noise Removal and Edge Preservation", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290910191/12OmNB8TUc8", "parentPublication": { "id": "proceedings/snpd/2007/2909/1", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775703", "title": "Anisotropic Diffusion for Preservation of Line-edges", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775703/12OmNBA9oBV", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvgip/2008/3476/0/3476a033", "title": "Edge Detectors Based Anisotropic Diffusion for Enhancement of Digital Images", "doi": null, "abstractUrl": "/proceedings-article/icvgip/2008/3476a033/12OmNBTs7I5", "parentPublication": { "id": "proceedings/icvgip/2008/3476/0", "title": "Computer Vision, Graphics &amp; Image Processing, Indian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a383", "title": "Regularized Gradient Kernel Anisotropic Diffusion for Better Image Filtering", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a383/12OmNwCsdAi", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccie/2010/4026/2/4026b111", "title": "Digital Image Magnification with Improved Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/ccie/2010/4026b111/12OmNyoSbiK", "parentPublication": { "id": "proceedings/ccie/2010/4026/2", "title": "Computing, Control and Industrial Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/1/07294790", "title": "Oriented half Gaussian kernels and anisotropic diffusion", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07294790/12OmNyrIatl", "parentPublication": { "id": "proceedings/visapp/2014/8133/1", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvgip/2008/3476/0/3476a599", "title": "Anisotropic Diffusion and Segmentation of Colored Flowers", "doi": null, "abstractUrl": "/proceedings-article/icvgip/2008/3476a599/12OmNzBOi2h", "parentPublication": { "id": "proceedings/icvgip/2008/3476/0", "title": "Computer Vision, Graphics &amp; Image Processing, Indian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a703", "title": "Low-contrast Edge Enhancing Anisotropic Diffusion for Speckle Reduction", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a703/12OmNzhELor", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBgQFLV", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "acronym": "isspit", "groupId": "1001026", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBA9oBV", "doi": "10.1109/ISSPIT.2008.4775703", "title": "Anisotropic Diffusion for Preservation of Line-edges", "normalizedTitle": "Anisotropic Diffusion for Preservation of Line-edges", "abstract": "In existing approaches, diffusion is performed in four directions (North, South, East, West) without specific conditions. Therefore, these methods have shortcomings of distorted with the existence of impulse noises. In this paper, a new anisotropic diffusion based on directions of line-edges is proposed to enhance preservation of line-edges together with removal of noises. In the proposed method, an edge detection mask is used to find the direction of a line-edge. As a result, when the magnitude of edge detection is large enough, there exists a line-edge. In the case of a line-edge, the weight of diffusion is selected adaptively according to the direction of the line-edge. The diffusion is based on 8-directions diffusion with emphasis on the line-edge direction. Experimental results show that the proposed method can eliminate noise while preserving contour of line-edges.", "abstracts": [ { "abstractType": "Regular", "content": "In existing approaches, diffusion is performed in four directions (North, South, East, West) without specific conditions. Therefore, these methods have shortcomings of distorted with the existence of impulse noises. In this paper, a new anisotropic diffusion based on directions of line-edges is proposed to enhance preservation of line-edges together with removal of noises. In the proposed method, an edge detection mask is used to find the direction of a line-edge. As a result, when the magnitude of edge detection is large enough, there exists a line-edge. In the case of a line-edge, the weight of diffusion is selected adaptively according to the direction of the line-edge. The diffusion is based on 8-directions diffusion with emphasis on the line-edge direction. Experimental results show that the proposed method can eliminate noise while preserving contour of line-edges.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In existing approaches, diffusion is performed in four directions (North, South, East, West) without specific conditions. Therefore, these methods have shortcomings of distorted with the existence of impulse noises. In this paper, a new anisotropic diffusion based on directions of line-edges is proposed to enhance preservation of line-edges together with removal of noises. In the proposed method, an edge detection mask is used to find the direction of a line-edge. As a result, when the magnitude of edge detection is large enough, there exists a line-edge. In the case of a line-edge, the weight of diffusion is selected adaptively according to the direction of the line-edge. The diffusion is based on 8-directions diffusion with emphasis on the line-edge direction. Experimental results show that the proposed method can eliminate noise while preserving contour of line-edges.", "fno": "04775703", "keywords": [ "Computational Geometry", "Distortion", "Edge Detection", "Image Enhancement", "Impulse Noise", "Impulse Noise Distortion", "Line Edge Preservation Enhancement", "Line Edge Detection Mask", "Anisotropic Magnetoresistance", "Anisotropic Diffusion", "Gradient", "Line Edge", "Speckle Noise" ], "authors": [ { "affiliation": "Department of Electronics and Computer Eng., Chonnam National University, 500-757 Gwangju, Korea, iamtina@paran.com", "fullName": "HyeSuk Kim", "givenName": "HyeSuk", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronics and Computer Eng., Chonnam National University, 500-757 Gwangju, Korea", "fullName": "GiHong Kim", "givenName": "GiHong", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronics and Computer Eng., Chonnam National University, 500-757 Gwangju, Korea, gslee@chonnam.chonnam.ac.kr", "fullName": "GueeSang Lee", "givenName": "GueeSang", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronics and Computer Eng., Chonnam National University, 500-757 Gwangju, Korea; Electronics and Telecommunications Research Institute, Daejon, Korea", "fullName": "JuneYoung Chang", "givenName": "JuneYoung", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronics and Computer Eng., Chonnam National University, 500-757 Gwangju, Korea; Electronics and Telecommunications Research Institute, Daejon, Korea", "fullName": "HanJin Cho", "givenName": "HanJin", "surname": "Cho", "__typename": "ArticleAuthorType" } ], "idPrefix": "isspit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "2162-7843", "isbn": "978-1-4244-3554-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04775702", "articleId": "12OmNwbukcU", "__typename": "AdjacentArticleType" }, "next": { "fno": "04775704", "articleId": "12OmNB8kHRX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2008/3273/0/3273a198", "title": "Speckle Reducing Anisotropic Diffusion Based on Directions of Gradient", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a198/12OmNA0MZ73", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2014/4284/0/4284a111", "title": "Adaptive Anisotropic Diffusion for Image Denoising Based on Structure Tensor", "doi": null, "abstractUrl": "/proceedings-article/icdh/2014/4284a111/12OmNAS9zBg", "parentPublication": { "id": "proceedings/icdh/2014/4284/0", "title": "2014 5th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/citworkshops/2008/3242/0/3242a396", "title": "Anisotropic Diffusion Transform Based on Directions of Edges", "doi": null, "abstractUrl": "/proceedings-article/citworkshops/2008/3242a396/12OmNAs2tql", "parentPublication": { "id": "proceedings/citworkshops/2008/3242/0", "title": "Computer and Information Technology, IEEE 8th International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/1/290910191", "title": "An Improved Anisotropic Diffusion PDE for Noise Removal and Edge Preservation", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290910191/12OmNB8TUc8", "parentPublication": { "id": "proceedings/snpd/2007/2909/1", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvgip/2008/3476/0/3476a033", "title": "Edge Detectors Based Anisotropic Diffusion for Enhancement of Digital Images", "doi": null, "abstractUrl": "/proceedings-article/icvgip/2008/3476a033/12OmNBTs7I5", "parentPublication": { "id": "proceedings/icvgip/2008/3476/0", "title": "Computer Vision, Graphics &amp; Image Processing, Indian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a284", "title": "An Adaptive Image Denoising Model Based on Nonlocal Diffusion Tensor", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a284/12OmNwKoZeh", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccie/2010/4026/2/4026b111", "title": "Digital Image Magnification with Improved Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/ccie/2010/4026b111/12OmNyoSbiK", "parentPublication": { "id": "proceedings/ccie/2010/4026/2", "title": "Computing, Control and Industrial Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/1/07294790", "title": "Oriented half Gaussian kernels and anisotropic diffusion", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07294790/12OmNyrIatl", "parentPublication": { "id": "proceedings/visapp/2014/8133/1", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a703", "title": "Low-contrast Edge Enhancing Anisotropic Diffusion for Speckle Reduction", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a703/12OmNzhELor", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itca/2020/0378/0/037800a401", "title": "A Fingerprint Image Enhancement Method Based on Anisotropic Diffusion and Shock Filtering", "doi": null, "abstractUrl": "/proceedings-article/itca/2020/037800a401/1tpBlLS2Kxq", "parentPublication": { "id": "proceedings/itca/2020/0378/0", "title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAIMObt", "title": "Proceedings 11th International Conference on Tools with Artificial Intelligence", "acronym": "ictai", "groupId": "1000763", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNC3XhzH", "doi": "10.1109/TAI.1999.809789", "title": "Removing Node and Edge Overlapping in Graph Layouts by A Modified EGENET Solver", "normalizedTitle": "Removing Node and Edge Overlapping in Graph Layouts by A Modified EGENET Solver", "abstract": "Graph layout problems such as node and edge overlapping occur widely in many industrial computer-aided design applications. Usually, these problems are handled in an ad hoc manner by some specially designed algorithms. GENET and its extended model EGENET are local search methods used to solve constraint satisfaction problems such as the car-sequencing problems efficiently. Both models use the min-conflict heuristic to update every finite-domain variable for finding local minima, and then apply heuristic learning rule(s) to escape the local minima not representing solution(s). In the past, few researchers have ever considered to apply any local search method like the EGENET approach to solve graph layout problems. In this paper, we consider how to modify the original EGENET model for solving the graph layout problems formulated as continuous constrained optimization problems. The empirical evaluation of different approaches on the graph layout problems demonstrated some advantages of our modified EGENET approach, which requires further investigation. More importantly, this interesting proposal opens up numerous opportunities for exploring the other possible ways to modify the original EGENET model, or using the other local search methods to solve these graph layout problems.", "abstracts": [ { "abstractType": "Regular", "content": "Graph layout problems such as node and edge overlapping occur widely in many industrial computer-aided design applications. Usually, these problems are handled in an ad hoc manner by some specially designed algorithms. GENET and its extended model EGENET are local search methods used to solve constraint satisfaction problems such as the car-sequencing problems efficiently. Both models use the min-conflict heuristic to update every finite-domain variable for finding local minima, and then apply heuristic learning rule(s) to escape the local minima not representing solution(s). In the past, few researchers have ever considered to apply any local search method like the EGENET approach to solve graph layout problems. In this paper, we consider how to modify the original EGENET model for solving the graph layout problems formulated as continuous constrained optimization problems. The empirical evaluation of different approaches on the graph layout problems demonstrated some advantages of our modified EGENET approach, which requires further investigation. More importantly, this interesting proposal opens up numerous opportunities for exploring the other possible ways to modify the original EGENET model, or using the other local search methods to solve these graph layout problems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Graph layout problems such as node and edge overlapping occur widely in many industrial computer-aided design applications. Usually, these problems are handled in an ad hoc manner by some specially designed algorithms. GENET and its extended model EGENET are local search methods used to solve constraint satisfaction problems such as the car-sequencing problems efficiently. Both models use the min-conflict heuristic to update every finite-domain variable for finding local minima, and then apply heuristic learning rule(s) to escape the local minima not representing solution(s). In the past, few researchers have ever considered to apply any local search method like the EGENET approach to solve graph layout problems. In this paper, we consider how to modify the original EGENET model for solving the graph layout problems formulated as continuous constrained optimization problems. The empirical evaluation of different approaches on the graph layout problems demonstrated some advantages of our modified EGENET approach, which requires further investigation. More importantly, this interesting proposal opens up numerous opportunities for exploring the other possible ways to modify the original EGENET model, or using the other local search methods to solve these graph layout problems.", "fno": "04560218", "keywords": [ "Graph Layout Problems", "Local Search Methods" ], "authors": [ { "affiliation": "National University of Singapore", "fullName": "Vincent Tam", "givenName": "Vincent", "surname": "Tam", "__typename": "ArticleAuthorType" } ], "idPrefix": "ictai", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-11-01T00:00:00", "pubType": "proceedings", "pages": "218", "year": "1999", "issn": "1082-3409", "isbn": "0-7695-0456-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04560210", "articleId": "12OmNxd4twK", "__typename": "AdjacentArticleType" }, "next": { "fno": "04560229", "articleId": "12OmNzahbXJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrIJqqi", "title": "Instrumentation, Measurement, Computer, Communication and Control, International Conference on", "acronym": "imccc", "groupId": "1800575", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNviZlq5", "doi": "10.1109/IMCCC.2011.238", "title": "A Compound Control System of Axial Moving Mass and Aerodynamic Force for Mass Moment Missile", "normalizedTitle": "A Compound Control System of Axial Moving Mass and Aerodynamic Force for Mass Moment Missile", "abstract": "For the mass moment missile in-atmosphere, the principle of moving mass control has been studied with the purpose of finding the major factors that influence attitude motions. On this base, a layout of moving mass and aerodynamic rudder, along with a relative compound control system has been given. The feature of the control system is that the attitude control depends on the joint action of axial moving mass and aerodynamic force. Therefore the coordination control law of more moving masses is not necessary, the coupling between different channels is decreased, the layout design and the servo system design are simplified. Computer simulation has shown the compound control system is suitable for the typical trajectory flying.", "abstracts": [ { "abstractType": "Regular", "content": "For the mass moment missile in-atmosphere, the principle of moving mass control has been studied with the purpose of finding the major factors that influence attitude motions. On this base, a layout of moving mass and aerodynamic rudder, along with a relative compound control system has been given. The feature of the control system is that the attitude control depends on the joint action of axial moving mass and aerodynamic force. Therefore the coordination control law of more moving masses is not necessary, the coupling between different channels is decreased, the layout design and the servo system design are simplified. Computer simulation has shown the compound control system is suitable for the typical trajectory flying.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For the mass moment missile in-atmosphere, the principle of moving mass control has been studied with the purpose of finding the major factors that influence attitude motions. On this base, a layout of moving mass and aerodynamic rudder, along with a relative compound control system has been given. The feature of the control system is that the attitude control depends on the joint action of axial moving mass and aerodynamic force. Therefore the coordination control law of more moving masses is not necessary, the coupling between different channels is decreased, the layout design and the servo system design are simplified. Computer simulation has shown the compound control system is suitable for the typical trajectory flying.", "fno": "4519a945", "keywords": [ "Mass Moment Control", "Moving Mass Layout", "Compound Control Of Moving Mass And Aerodynamic Force" ], "authors": [ { "affiliation": null, "fullName": "Yuan Qianchen", "givenName": "Yuan", "surname": "Qianchen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhang Qingzhen", "givenName": "Zhang", "surname": "Qingzhen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhang Huiping", "givenName": "Zhang", "surname": "Huiping", "__typename": "ArticleAuthorType" } ], "idPrefix": "imccc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-10-01T00:00:00", "pubType": "proceedings", "pages": "945-948", "year": "2011", "issn": null, "isbn": "978-0-7695-4519-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4519a941", "articleId": "12OmNzYwc48", "__typename": "AdjacentArticleType" }, "next": { "fno": "4519a949", "articleId": "12OmNvFYQKa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2011/4296/2/4296c116", "title": "Experimental Investigation of the Plasma Aerodynamic Actuation Generated by Nanosecond-pulse Sliding Discharge", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296c116/12OmNANkohp", "parentPublication": { "id": "proceedings/icmtma/2011/4296/2", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ihmsc/2010/4151/1/4151a073", "title": "Aerodynamic Surfaces Control Allocation for RLV Reentry", "doi": null, "abstractUrl": "/proceedings-article/ihmsc/2010/4151a073/12OmNBOll8W", "parentPublication": { "id": "proceedings/ihmsc/2010/4151/1", "title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/3941/4/3941d123", "title": "Investigation of Active Flow Control on Aerodynamic Performance of HALE UAV Airfoil", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/3941d123/12OmNwpoFEX", "parentPublication": { "id": "proceedings/iccms/2010/3941/4", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2014/7100/0/07073224", "title": "Feature model for modeling compound SOA design patterns", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2014/07073224/12OmNx4yvsE", "parentPublication": { "id": "proceedings/aiccsa/2014/7100/0", "title": "2014 IEEE/ACS 11th International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031d597", "title": "Characteristics of Plasma Aerodynamic Actuation Generated by Polyphase Dielectric Barrier Discharge", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031d597/12OmNx5Yv4T", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2011/4296/1/4296a757", "title": "Compound Fuzzy PID Level Control System Based on WinCC and MATLAB", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296a757/12OmNx7XGYY", "parentPublication": { "id": "proceedings/icmtma/2011/4296/1", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/1/3357a337", "title": "Design of Autopilot for Aerodynamic/Reaction-Jet Multiple Control Missile", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357a337/12OmNxzMnOY", "parentPublication": { "id": "proceedings/icicta/2008/3357/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2011/4296/1/4296b018", "title": "Design of Vertical Launch Control System for Antiaircraft Missile Using Optical Control", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296b018/12OmNy2rS4I", "parentPublication": { "id": "proceedings/icmtma/2011/4296/1", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/3/4077e161", "title": "Research on the Noise Barrier Height Change of the Monoline Viaduct Affecting the Aerodynamic Characteristic of High Speed Train", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077e161/12OmNzSQdmc", "parentPublication": { "id": "proceedings/icicta/2010/4077/3", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeas/2021/9768/0/976800a032", "title": "Attitude Control of the Low Earth Orbit Satellite with Moving Masses under Strong Aerodynamic Disturbance", "doi": null, "abstractUrl": "/proceedings-article/icmeas/2021/976800a032/1zuuPFmAcsE", "parentPublication": { "id": "proceedings/icmeas/2021/9768/0", "title": "2021 7th International Conference on Mechanical Engineering and Automation Science (ICMEAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwbcJ3u", "title": "Computing, Control and Industrial Engineering, International Conference on", "acronym": "ccie", "groupId": "1800073", "volume": "2", "displayVolume": "2", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyoSbiK", "doi": "10.1109/CCIE.2010.146", "title": "Digital Image Magnification with Improved Anisotropic Diffusion", "normalizedTitle": "Digital Image Magnification with Improved Anisotropic Diffusion", "abstract": "The important issue of digital image magnification is to remove zigzagging artifacts and to restrain blurring and other artificial artifacts. In this paper a method with improved anisotropic diffusion is proposed to eliminate these artifacts. This kind of energy dissipation diffuses differently on two sides of image edges. The slope of edges is enlarged by this energy diffusion process, which restrains blurring on edges. Meanwhile, diffusion along image contours smoothes edges to eliminate the zigzagging artifacts. Numerical experiments on real images show that this method can eliminate efficiently zigzagging and edge blurring artifacts.", "abstracts": [ { "abstractType": "Regular", "content": "The important issue of digital image magnification is to remove zigzagging artifacts and to restrain blurring and other artificial artifacts. In this paper a method with improved anisotropic diffusion is proposed to eliminate these artifacts. This kind of energy dissipation diffuses differently on two sides of image edges. The slope of edges is enlarged by this energy diffusion process, which restrains blurring on edges. Meanwhile, diffusion along image contours smoothes edges to eliminate the zigzagging artifacts. Numerical experiments on real images show that this method can eliminate efficiently zigzagging and edge blurring artifacts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The important issue of digital image magnification is to remove zigzagging artifacts and to restrain blurring and other artificial artifacts. In this paper a method with improved anisotropic diffusion is proposed to eliminate these artifacts. This kind of energy dissipation diffuses differently on two sides of image edges. The slope of edges is enlarged by this energy diffusion process, which restrains blurring on edges. Meanwhile, diffusion along image contours smoothes edges to eliminate the zigzagging artifacts. Numerical experiments on real images show that this method can eliminate efficiently zigzagging and edge blurring artifacts.", "fno": "4026b111", "keywords": [ "Image Magnification", "Anisotropic Diffusion", "Regularization" ], "authors": [ { "affiliation": null, "fullName": "Yi-An Chen", "givenName": "Yi-An", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "ccie", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-06-01T00:00:00", "pubType": "proceedings", "pages": "111-113", "year": "2010", "issn": null, "isbn": "978-0-7695-4026-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4026b107", "articleId": "12OmNwcl7Ks", "__typename": "AdjacentArticleType" }, "next": { "fno": "4026b114", "articleId": "12OmNAndivB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2008/3273/0/3273a198", "title": "Speckle Reducing Anisotropic Diffusion Based on Directions of Gradient", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a198/12OmNA0MZ73", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/2/3962d091", "title": "Research of Low SNR Crack Denoising Based on Kernel Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962d091/12OmNAZOJYb", "parentPublication": { "id": "proceedings/icmtma/2010/3962/2", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/citworkshops/2008/3242/0/3242a396", "title": "Anisotropic Diffusion Transform Based on Directions of Edges", "doi": null, "abstractUrl": "/proceedings-article/citworkshops/2008/3242a396/12OmNAs2tql", "parentPublication": { "id": "proceedings/citworkshops/2008/3242/0", "title": "Computer and Information Technology, IEEE 8th International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2008/3165/0/3165a029", "title": "Noise Filtering Using Edge-Driven Adaptive Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/cbms/2008/3165a029/12OmNB1wkMx", "parentPublication": { "id": "proceedings/cbms/2008/3165/0", "title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/1/290910191", "title": "An Improved Anisotropic Diffusion PDE for Noise Removal and Edge Preservation", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290910191/12OmNB8TUc8", "parentPublication": { "id": "proceedings/snpd/2007/2909/1", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775703", "title": "Anisotropic Diffusion for Preservation of Line-edges", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775703/12OmNBA9oBV", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvgip/2008/3476/0/3476a033", "title": "Edge Detectors Based Anisotropic Diffusion for Enhancement of Digital Images", "doi": null, "abstractUrl": "/proceedings-article/icvgip/2008/3476a033/12OmNBTs7I5", "parentPublication": { "id": "proceedings/icvgip/2008/3476/0", "title": "Computer Vision, Graphics &amp; Image Processing, Indian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/3/81833352", "title": "Using mean field annealing to solve anisotropic diffusion problems", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81833352/12OmNCesr9M", "parentPublication": { "id": "proceedings/icip/1997/8183/3", "title": "Proceedings of International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a383", "title": "Regularized Gradient Kernel Anisotropic Diffusion for Better Image Filtering", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a383/12OmNwCsdAi", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a703", "title": "Low-contrast Edge Enhancing Anisotropic Diffusion for Speckle Reduction", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a703/12OmNzhELor", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAY79oC", "title": "Computer Vision, Graphics & Image Processing, Indian Conference on", "acronym": "icvgip", "groupId": "1800020", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNzBOi2h", "doi": "10.1109/ICVGIP.2008.37", "title": "Anisotropic Diffusion and Segmentation of Colored Flowers", "normalizedTitle": "Anisotropic Diffusion and Segmentation of Colored Flowers", "abstract": "This paper presents a new anisotropic diffusion method for multi-valued images. The colored flowers are segmented from the background. In my diffusion technique the edge stopping function is computed as inversely proportional to the weighted sum of gradients of color components. The value of edge stopping function tends to zero as the weighted sum of three color gradients becomes large. The other algorithm further explores an existing diffusion technique based on structure tensors. The multivalued image diffuses using a system of coupled differential equations in the direction of minimal change. The paper presents the results of anisotropic diffusion and segmentation of colored flowers. The new method can also be applied for anisotropic diffusion of other multivalued images.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a new anisotropic diffusion method for multi-valued images. The colored flowers are segmented from the background. In my diffusion technique the edge stopping function is computed as inversely proportional to the weighted sum of gradients of color components. The value of edge stopping function tends to zero as the weighted sum of three color gradients becomes large. The other algorithm further explores an existing diffusion technique based on structure tensors. The multivalued image diffuses using a system of coupled differential equations in the direction of minimal change. The paper presents the results of anisotropic diffusion and segmentation of colored flowers. The new method can also be applied for anisotropic diffusion of other multivalued images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a new anisotropic diffusion method for multi-valued images. The colored flowers are segmented from the background. In my diffusion technique the edge stopping function is computed as inversely proportional to the weighted sum of gradients of color components. The value of edge stopping function tends to zero as the weighted sum of three color gradients becomes large. The other algorithm further explores an existing diffusion technique based on structure tensors. The multivalued image diffuses using a system of coupled differential equations in the direction of minimal change. The paper presents the results of anisotropic diffusion and segmentation of colored flowers. The new method can also be applied for anisotropic diffusion of other multivalued images.", "fno": "3476a599", "keywords": [ "Anisotropic Diffusion", "Segmentation", "Edge Detection", "Color" ], "authors": [ { "affiliation": null, "fullName": "Shoma Chatterjee", "givenName": "Shoma", "surname": "Chatterjee", "__typename": "ArticleAuthorType" } ], "idPrefix": "icvgip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "599-605", "year": "2008", "issn": null, "isbn": "978-0-7695-3476-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3476a591", "articleId": "12OmNrJAeer", "__typename": "AdjacentArticleType" }, "next": { "fno": "3476a614", "articleId": "12OmNCdBDMf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2008/3273/0/3273a198", "title": "Speckle Reducing Anisotropic Diffusion Based on Directions of Gradient", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a198/12OmNA0MZ73", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/2/3494b591", "title": "Anisotropic Diffusion with Morphological Reconstruction and Automatic Seeded Region Growing for Color Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b591/12OmNAPSMm8", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/citworkshops/2008/3242/0/3242a396", "title": "Anisotropic Diffusion Transform Based on Directions of Edges", "doi": null, "abstractUrl": "/proceedings-article/citworkshops/2008/3242a396/12OmNAs2tql", "parentPublication": { "id": "proceedings/citworkshops/2008/3242/0", "title": "Computer and Information Technology, IEEE 8th International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iitsi/2010/4020/0/4020a789", "title": "Volume Rendering Effect Analysis with Image Preprocessing Technology Based on Three Dimension Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/iitsi/2010/4020a789/12OmNBEpnBT", "parentPublication": { "id": "proceedings/iitsi/2010/4020/0", "title": "Intelligent Information Technology and Security Informatics, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831263", "title": "Robust anisotropic diffusion and sharpening of scalar and vector images", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831263/12OmNviZlnF", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccie/2010/4026/2/4026b111", "title": "Digital Image Magnification with Improved Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/ccie/2010/4026b111/12OmNyoSbiK", "parentPublication": { "id": "proceedings/ccie/2010/4026/2", "title": "Computing, Control and Industrial Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/1/07294790", "title": "Oriented half Gaussian kernels and anisotropic diffusion", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07294790/12OmNyrIatl", "parentPublication": { "id": "proceedings/visapp/2014/8133/1", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a703", "title": "Low-contrast Edge Enhancing Anisotropic Diffusion for Speckle Reduction", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a703/12OmNzhELor", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2009/3762/0/3762b201", "title": "Denoising by Anisotropic Diffusion in ICA Subspace", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762b201/12OmNzmLxNj", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1999/01/i0042", "title": "Adaptive Nonlocal Filtering: A Fast Alternative to Anisotropic Diffusion for Image Enhancement", "doi": null, "abstractUrl": "/journal/tp/1999/01/i0042/13rRUwbJD5T", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs4S8vJ", "title": "Environmental Science and Information Application Technology, International Conference on", "acronym": "esiat", "groupId": "1002836", "volume": "3", "displayVolume": "3", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNApLGra", "doi": "10.1109/ESIAT.2009.48", "title": "A Simulation of Gas Migration in Heterogeneous Goaf of Fully Mechanized Coal Caving Mining Face Based on Multi-components LBM", "normalizedTitle": "A Simulation of Gas Migration in Heterogeneous Goaf of Fully Mechanized Coal Caving Mining Face Based on Multi-components LBM", "abstract": "Gas migration in a goaf is the cause of upper corner gas over limit which greatly influences coal mining safety. To explore gas migration law, this paper provides a Lattice Boltzmann (LB) simulation of gas migration in a heterogeneous goaf of a fully mechanized coal caving mining face. The goaf of a fully mechanized coal caving mining face is an area which fills with heterogeneous porous media and gas migration in it is complicated seepage movement which contains laminar, transitional and turbulent flow. Under the amended Brinkman-Forchheimer-Darcy law, this paper provides a control system which reflects the characters of gas migration law in a heterogeneous goaf of a fully mechanized coal caving mining face. Multi-components LBM is used to solve the complicated control system. Two Lattice Boltzmann Equations (LBEs) are constructed to simulate the atmosphere and gas seepage velocity field respectively. By the evolution of these two LB models, simulation results are gained. The simulation can produce many data such as speed, pressure and concentration in every time and space, thus we can get gas migration law directly. A case study showed: This method can combine time, space and system action together. It can simulate and analyze acutely the situation of gas migration in direct condition and provide an alterable method to reveal and control gas migration in an underground coal mine.", "abstracts": [ { "abstractType": "Regular", "content": "Gas migration in a goaf is the cause of upper corner gas over limit which greatly influences coal mining safety. To explore gas migration law, this paper provides a Lattice Boltzmann (LB) simulation of gas migration in a heterogeneous goaf of a fully mechanized coal caving mining face. The goaf of a fully mechanized coal caving mining face is an area which fills with heterogeneous porous media and gas migration in it is complicated seepage movement which contains laminar, transitional and turbulent flow. Under the amended Brinkman-Forchheimer-Darcy law, this paper provides a control system which reflects the characters of gas migration law in a heterogeneous goaf of a fully mechanized coal caving mining face. Multi-components LBM is used to solve the complicated control system. Two Lattice Boltzmann Equations (LBEs) are constructed to simulate the atmosphere and gas seepage velocity field respectively. By the evolution of these two LB models, simulation results are gained. The simulation can produce many data such as speed, pressure and concentration in every time and space, thus we can get gas migration law directly. A case study showed: This method can combine time, space and system action together. It can simulate and analyze acutely the situation of gas migration in direct condition and provide an alterable method to reveal and control gas migration in an underground coal mine.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Gas migration in a goaf is the cause of upper corner gas over limit which greatly influences coal mining safety. To explore gas migration law, this paper provides a Lattice Boltzmann (LB) simulation of gas migration in a heterogeneous goaf of a fully mechanized coal caving mining face. The goaf of a fully mechanized coal caving mining face is an area which fills with heterogeneous porous media and gas migration in it is complicated seepage movement which contains laminar, transitional and turbulent flow. Under the amended Brinkman-Forchheimer-Darcy law, this paper provides a control system which reflects the characters of gas migration law in a heterogeneous goaf of a fully mechanized coal caving mining face. Multi-components LBM is used to solve the complicated control system. Two Lattice Boltzmann Equations (LBEs) are constructed to simulate the atmosphere and gas seepage velocity field respectively. By the evolution of these two LB models, simulation results are gained. The simulation can produce many data such as speed, pressure and concentration in every time and space, thus we can get gas migration law directly. A case study showed: This method can combine time, space and system action together. It can simulate and analyze acutely the situation of gas migration in direct condition and provide an alterable method to reveal and control gas migration in an underground coal mine.", "fno": "3682c531", "keywords": [ "Computer Simulation", "Gas Migration", "Multi Components Flow", "Lattice Boltzmann Method", "Mine Safety" ], "authors": [ { "affiliation": null, "fullName": "Qiu-qin Lu", "givenName": "Qiu-qin", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Guang-qiu Huang", "givenName": "Guang-qiu", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "esiat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-07-01T00:00:00", "pubType": "proceedings", "pages": "531-534", "year": "2009", "issn": null, "isbn": "978-0-7695-3682-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3682c527", "articleId": "12OmNzayNe8", "__typename": "AdjacentArticleType" }, "next": { "fno": "3682c535", "articleId": "12OmNAR1aUk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/esiat/2009/3682/3/3682c535", "title": "A Numeric Simulation of Gas Migration in a Fully Mechanized Coal Caving Stope Based on Lattice Boltzmann Method", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682c535/12OmNAR1aUk", "parentPublication": { "id": "proceedings/esiat/2009/3682/3", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscid/2009/3865/2/3865b239", "title": "The Mechanism Study of Serious Gas Explosion Accident at Coal Face", "doi": null, "abstractUrl": "/proceedings-article/iscid/2009/3865b239/12OmNvTTcbg", "parentPublication": { "id": "proceedings/iscid/2009/3865/2", "title": "Computational Intelligence and Design, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2009/3634/4/3634d308", "title": "Simulation Model of Gas Migration and Hindering in Underground Tunnel Based on LBM", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634d308/12OmNwDj16i", "parentPublication": { "id": "proceedings/icic/2009/3634/4", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cinc/2009/3645/1/3645a241", "title": "Application of Grey Relation Clustering and CGNN in Gas Concentration Prediction in Top Corner of Coal Mine", "doi": null, "abstractUrl": "/proceedings-article/cinc/2009/3645a241/12OmNwFicWg", "parentPublication": { "id": "proceedings/cinc/2009/3645/1", "title": "Computational Intelligence and Natural Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iitaw/2009/3860/0/3860a440", "title": "Gas Emission Rate Prediction in Fully-Mechanized Excavated Faces Based on Support Vector Machine", "doi": null, "abstractUrl": "/proceedings-article/iitaw/2009/3860a440/12OmNweTvQX", "parentPublication": { "id": "proceedings/iitaw/2009/3860/0", "title": "2009 Third International Symposium on Intelligent Information Technology Application Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456405", "title": "Detecting of Coal Gas Weak Signals Using Lyapunov Exponent under Strong Noise Background", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456405/12OmNyv7m6p", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2014/6636/0/6636a392", "title": "Numerical Simulation of Gas Migration Regularity under Different Periodic Step Distance in Goaf", "doi": null, "abstractUrl": "/proceedings-article/icicta/2014/6636a392/12OmNzYNN5y", "parentPublication": { "id": "proceedings/icicta/2014/6636/0", "title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wmso/2008/3484/0/3484a384", "title": "Numerical Simulation of Gas Drainage in Low-Permeability Fissured Coal", "doi": null, "abstractUrl": "/proceedings-article/wmso/2008/3484a384/12OmNznCl01", "parentPublication": { "id": "proceedings/wmso/2008/3484/0", "title": "Modelling, Simulation and Optimization, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icuems/2020/8832/0/09151469", "title": "Study on the dangerous area division of strong mine pressure and characteristics of mine pressure in small pillar fully mechanized caving face", "doi": null, "abstractUrl": "/proceedings-article/icuems/2020/09151469/1lRlVn86jiE", "parentPublication": { "id": "proceedings/icuems/2020/8832/0", "title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a053", "title": "Research on Construction Method of Fully Mechanized Coal Mining Equipment Workspace Based on Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a053/1vg7IGUQ4o0", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0lM", "title": "2008 International Symposium on Information Science and Engieering", "acronym": "isise", "groupId": "1002561", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNApu5CQ", "doi": "10.1109/ISISE.2008.182", "title": "High Performance Lattice Boltzmann Algorithms for Fluid Flows", "normalizedTitle": "High Performance Lattice Boltzmann Algorithms for Fluid Flows", "abstract": "While the lattice Boltzmann method (LBM) has attracted much attention in the area of CFD in recent years, it has also been recognized that it is both computationally demanding and memory intensive. Extensive studies on improving the performance of LBM have been carried out. In this work, various efficient implementation algorithms of LBM are investigated in terms of computational performance and memory consumptions. More precisely, we consider four types high performance LB algorithms: efficient grid refinement, parallel, cache optimization and GPU-based algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "While the lattice Boltzmann method (LBM) has attracted much attention in the area of CFD in recent years, it has also been recognized that it is both computationally demanding and memory intensive. Extensive studies on improving the performance of LBM have been carried out. In this work, various efficient implementation algorithms of LBM are investigated in terms of computational performance and memory consumptions. More precisely, we consider four types high performance LB algorithms: efficient grid refinement, parallel, cache optimization and GPU-based algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "While the lattice Boltzmann method (LBM) has attracted much attention in the area of CFD in recent years, it has also been recognized that it is both computationally demanding and memory intensive. Extensive studies on improving the performance of LBM have been carried out. In this work, various efficient implementation algorithms of LBM are investigated in terms of computational performance and memory consumptions. More precisely, we consider four types high performance LB algorithms: efficient grid refinement, parallel, cache optimization and GPU-based algorithms.", "fno": "3494a033", "keywords": [ "Lattice Boltzmann Method", "Fluid Flow", "High Performance Algorithm", "Performance Analysis" ], "authors": [ { "affiliation": null, "fullName": "Weibin Guo", "givenName": "Weibin", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Cheqing Jin", "givenName": "Cheqing", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianhua Li", "givenName": "Jianhua", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "isise", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "33-37", "year": "2008", "issn": null, "isbn": "978-0-7695-3494-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3494a029", "articleId": "12OmNzCF4YP", "__typename": "AdjacentArticleType" }, "next": { "fno": "3494a038", "articleId": "12OmNyq0zEB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpp/2009/3802/0/3802a550", "title": "Accelerating Lattice Boltzmann Fluid Flow Simulations Using Graphics Processors", "doi": null, "abstractUrl": "/proceedings-article/icpp/2009/3802a550/12OmNB1wkOA", "parentPublication": { "id": "proceedings/icpp/2009/3802/0", "title": "2009 International Conference on Parallel Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2013/4932/0/4932a677", "title": "Lattice Boltzmann Simulation of Drag Reduction for the Flow Around an Elliptic Cylinder in Electromagnetic Field", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2013/4932a677/12OmNBrV1Mk", "parentPublication": { "id": "proceedings/icmtma/2013/4932/0", "title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsi/2015/8673/0/8673a199", "title": "Validation of a Dynamically Adaptive Lattice Boltzmann Method for 2D Thermal Convection Simulations", "doi": null, "abstractUrl": "/proceedings-article/mcsi/2015/8673a199/12OmNqG0T57", "parentPublication": { "id": "proceedings/mcsi/2015/8673/0", "title": "2015 Second International Conference on Mathematics and Computers in Sciences and in Industry (MCSI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2011/4584/0/4584b159", "title": "Level Set Region Based Image Segmentation Using Lattice Boltzmann Method", "doi": null, "abstractUrl": "/proceedings-article/cis/2011/4584b159/12OmNx9FhR9", "parentPublication": { "id": "proceedings/cis/2011/4584/0", "title": "2011 Seventh International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2015/8488/0/8488a315", "title": "Memory-Efficient Parallelization of 3D Lattice Boltzmann Flow Solver on a GPU", "doi": null, "abstractUrl": "/proceedings-article/hipc/2015/8488a315/12OmNyRPgxl", "parentPublication": { "id": "proceedings/hipc/2015/8488/0", "title": "2015 IEEE 22nd International Conference on High Performance Computing (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icie/2009/3679/1/3679a107", "title": "Parallel Lattice Boltzmann Simulation for Fluid Flow on Multicore Platform", "doi": null, "abstractUrl": "/proceedings-article/icie/2009/3679a107/12OmNyaoDxI", "parentPublication": { "id": "proceedings/icie/2009/3679/1", "title": "2009 WASE International Conference on Information Engineering (ICIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2007/3088/0/30880052", "title": "Lattice Boltzmann Algorithms for Fluid Turbulence", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2007/30880052/12OmNzBwGKy", "parentPublication": { "id": "proceedings/hpcmp-ugc/2007/3088/0", "title": "HPCMP Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/02/v0164", "title": "The Lattice-Boltzmann Method for Simulating Gaseous Phenomena", "doi": null, "abstractUrl": "/journal/tg/2004/02/v0164/13rRUwh80H2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbac-pad/2018/7769/0/08645909", "title": "Designing a Parallel Memory-Aware Lattice Boltzmann Algorithm on Manycore Systems", "doi": null, "abstractUrl": "/proceedings-article/sbac-pad/2018/08645909/17QjJfcUM9P", "parentPublication": { "id": "proceedings/sbac-pad/2018/7769/0", "title": "2018 30th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/03/09492751", "title": "Propagation Pattern for Moment Representation of the Lattice Boltzmann Method", "doi": null, "abstractUrl": "/journal/td/2022/03/09492751/1vq0IzdZTna", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }