data dict |
|---|
{
"proceeding": {
"id": "12OmNyjLoRf",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "2",
"displayVolume": "2",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBW0vFt",
"doi": "10.1109/ICPR.2002.1048355",
"title": "Mapping emotional status to facial expressions",
"normalizedTitle": "Mapping emotional status to facial expressions",
"abstract": "The facial expression plays a crucial role in interpersonal communication. We can perceive people's inner world by observing his/her facial expressions. We present a simple methodology for synthesizing realistic facial expressions by manipulating emotional status. For the convenience of parametric representation, we adopt a statistical model to describe facial appearance variations due emotional factors. We investigate the correlation of parameters between emotional status and face model, and design an emotional function that maps one to another. Since the emotional function considers the way in which pose expressions only, it is independent of the particular subject within the training set and can be used to simulate emotional expressions for a new person. The capability of emotional function enables us to conduct a series of interesting experiments, such as predicting unseen expressions for a unfamiliar person, simulating one's facial expression in other's style, extracting pure expressions from a mixture. All these experimental methods and visual results are presented.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The facial expression plays a crucial role in interpersonal communication. We can perceive people's inner world by observing his/her facial expressions. We present a simple methodology for synthesizing realistic facial expressions by manipulating emotional status. For the convenience of parametric representation, we adopt a statistical model to describe facial appearance variations due emotional factors. We investigate the correlation of parameters between emotional status and face model, and design an emotional function that maps one to another. Since the emotional function considers the way in which pose expressions only, it is independent of the particular subject within the training set and can be used to simulate emotional expressions for a new person. The capability of emotional function enables us to conduct a series of interesting experiments, such as predicting unseen expressions for a unfamiliar person, simulating one's facial expression in other's style, extracting pure expressions from a mixture. All these experimental methods and visual results are presented.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The facial expression plays a crucial role in interpersonal communication. We can perceive people's inner world by observing his/her facial expressions. We present a simple methodology for synthesizing realistic facial expressions by manipulating emotional status. For the convenience of parametric representation, we adopt a statistical model to describe facial appearance variations due emotional factors. We investigate the correlation of parameters between emotional status and face model, and design an emotional function that maps one to another. Since the emotional function considers the way in which pose expressions only, it is independent of the particular subject within the training set and can be used to simulate emotional expressions for a new person. The capability of emotional function enables us to conduct a series of interesting experiments, such as predicting unseen expressions for a unfamiliar person, simulating one's facial expression in other's style, extracting pure expressions from a mixture. All these experimental methods and visual results are presented.",
"fno": "01048355",
"keywords": [
"Realistic Images",
"User Interfaces",
"Computer Animation",
"Visual Databases",
"Emotional Status Mapping",
"Realistic Facial Expressions Synthesis",
"Parametric Representation",
"Statistical Model",
"Experiments",
"Facial Image Database",
"Interpersonal Communication",
"Animation",
"Facial Animation",
"Databases",
"Computational Modeling",
"Computer Simulation",
"Humans",
"Eyes",
"Rendering Computer Graphics",
"Shape",
"Control Systems",
"Financial Advantage Program"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China",
"fullName": "Yangzhou Du",
"givenName": null,
"surname": "Yangzhou Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China",
"fullName": "Xueyin Lin",
"givenName": null,
"surname": "Xueyin Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-01-01T00:00:00",
"pubType": "proceedings",
"pages": "524,525,526,527",
"year": "2002",
"issn": "1051-4651",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "169520465",
"articleId": "12OmNvvLi74",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "169520469",
"articleId": "12OmNBC8AvT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2008/2153/0/04813317",
"title": "Emotional contagion for unseen bodily expressions: Evidence from facial EMG",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813317/12OmNAmmuOV",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/devlrn/2005/9226/0/01490973",
"title": "Emotional elicitation by dynamic facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ",
"parentPublication": {
"id": "proceedings/devlrn/2005/9226/0",
"title": "International Conference on Development and Learning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2013/0820/0/06632622",
"title": "Transfer facial expressions with identical topology",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2013/06632622/12OmNCwUmzy",
"parentPublication": {
"id": "proceedings/cgames/2013/0820/0",
"title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/169520524",
"title": "Mapping Emotional Status to Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169520524/12OmNqzu6Uo",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349569",
"title": "Evaluation of multimodal sequential expressions of emotions in ECA",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349569/12OmNrIrPrp",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-vis/2008/3271/0/3271a135",
"title": "Visualisation Tool for Representing Synthetic Facial Emotional Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/iv-vis/2008/3271a135/12OmNvRU0qD",
"parentPublication": {
"id": "proceedings/iv-vis/2008/3271/0",
"title": "Visualisation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349549",
"title": "Perception of emotional expressions in different representations using facial feature points",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349549/12OmNzUgdes",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a061",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a061/12OmNzZmZrJ",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/02/07547933",
"title": "Virtual Character Facial Expressions Influence Human Brain and Facial EMG Activity in a Decision-Making Game",
"doi": null,
"abstractUrl": "/journal/ta/2018/02/07547933/13rRUxlgy26",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090662",
"title": "Perception of Head Motion Effect on Emotional Facial Expression in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090662/1jIxmuXW5Es",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzuIjee",
"title": "Digital Media and Digital Content Management, Workshop on",
"acronym": "dmdcm",
"groupId": "1800440",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrHjqI9",
"doi": "10.1109/DMDCM.2011.76",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"normalizedTitle": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"abstract": "In this paper, we provide a novel algorithm for building real time 3D virtual facial animation of avatar by drawing facial expression features of real characters. This algorithm first (automatically) labels the feature points of user's facial imagines drew from camera. Then sequentially estimates the emotional state of user's face expressions by Hidden Markov Model. At last construct the avatar's facial animation according to these emotional states. We explore the complex expression space by outputting synthetic facial animations blended by 6 standard facial expressions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we provide a novel algorithm for building real time 3D virtual facial animation of avatar by drawing facial expression features of real characters. This algorithm first (automatically) labels the feature points of user's facial imagines drew from camera. Then sequentially estimates the emotional state of user's face expressions by Hidden Markov Model. At last construct the avatar's facial animation according to these emotional states. We explore the complex expression space by outputting synthetic facial animations blended by 6 standard facial expressions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we provide a novel algorithm for building real time 3D virtual facial animation of avatar by drawing facial expression features of real characters. This algorithm first (automatically) labels the feature points of user's facial imagines drew from camera. Then sequentially estimates the emotional state of user's face expressions by Hidden Markov Model. At last construct the avatar's facial animation according to these emotional states. We explore the complex expression space by outputting synthetic facial animations blended by 6 standard facial expressions.",
"fno": "4413a132",
"keywords": [
"Face Animation",
"Hidden Markov Model",
"Facial Expression Extracting",
"Emotional State Estimation"
],
"authors": [
{
"affiliation": null,
"fullName": "Wei Zhou",
"givenName": "Wei",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nan Xiang",
"givenName": "Nan",
"surname": "Xiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaojian Zhou",
"givenName": "Xiaojian",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dmdcm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-05-01T00:00:00",
"pubType": "proceedings",
"pages": "132-135",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4413-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4413a125",
"articleId": "12OmNzIUfVx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4413a136",
"articleId": "12OmNy6HQP8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840077",
"title": "\"May I talk to you? :-)\" — Facial Animation from Text",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840077/12OmNAkWveH",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2000/0580/0/00840628",
"title": "Facial tracking and animation using a 3D sensor",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2000/00840628/12OmNBlFQZ9",
"parentPublication": {
"id": "proceedings/fg/2000/0580/0",
"title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/devlrn/2005/9226/0/01490973",
"title": "Emotional elicitation by dynamic facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ",
"parentPublication": {
"id": "proceedings/devlrn/2005/9226/0",
"title": "International Conference on Development and Learning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2009/3846/0/3846a081",
"title": "Fast and Realistic 2D Facial Animation Based on Image Warping",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2009/3846a081/12OmNqGA59e",
"parentPublication": {
"id": "proceedings/kse/2009/3846/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/computationworld/2009/3862/0/3862a675",
"title": "Investigating Facial Animation Production through Artistic Inquiry",
"doi": null,
"abstractUrl": "/proceedings-article/computationworld/2009/3862a675/12OmNvDqsLT",
"parentPublication": {
"id": "proceedings/computationworld/2009/3862/0",
"title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1996/7588/0/75880098",
"title": "Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2",
"parentPublication": {
"id": "proceedings/ca/1996/7588/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2006/2606/0/26060428",
"title": "Facial Animation Using Emotional Model",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2006/26060428/12OmNwCaCrJ",
"parentPublication": {
"id": "proceedings/cgiv/2006/2606/0",
"title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a009",
"title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a061",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a061/12OmNzZmZrJ",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/04/mcg2010040051",
"title": "Modeling Short-Term Dynamics and Variability for Realistic Interactive Facial Animation",
"doi": null,
"abstractUrl": "/magazine/cg/2010/04/mcg2010040051/13rRUwgQpwW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyfdOIP",
"title": "Visualisation, International Conference on",
"acronym": "iv-vis",
"groupId": "1001944",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvRU0qD",
"doi": "10.1109/VIS.2008.14",
"title": "Visualisation Tool for Representing Synthetic Facial Emotional Expressions",
"normalizedTitle": "Visualisation Tool for Representing Synthetic Facial Emotional Expressions",
"abstract": "This paper describes a face-muscle model system capable ofemoting avatars in a variety of applications. The presented model produces dynamically changing facial expressions on computer generated faces, using mathematically modeled muscle deformations. The muscle model used to distort sets of vertices in a 3D space, is independent of the geometric model and hence it can be applied to arbitrary face meshes. The work presented here is based on the theories of Keith Waters and Fred Parke as detailed in their book Computer Facial Animation. The original Geoface program was written by Keith Waters in 1994 at Cambridge Research Laboratories, and is available from the OpenGL organisation as part of a demo package.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes a face-muscle model system capable ofemoting avatars in a variety of applications. The presented model produces dynamically changing facial expressions on computer generated faces, using mathematically modeled muscle deformations. The muscle model used to distort sets of vertices in a 3D space, is independent of the geometric model and hence it can be applied to arbitrary face meshes. The work presented here is based on the theories of Keith Waters and Fred Parke as detailed in their book Computer Facial Animation. The original Geoface program was written by Keith Waters in 1994 at Cambridge Research Laboratories, and is available from the OpenGL organisation as part of a demo package.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes a face-muscle model system capable ofemoting avatars in a variety of applications. The presented model produces dynamically changing facial expressions on computer generated faces, using mathematically modeled muscle deformations. The muscle model used to distort sets of vertices in a 3D space, is independent of the geometric model and hence it can be applied to arbitrary face meshes. The work presented here is based on the theories of Keith Waters and Fred Parke as detailed in their book Computer Facial Animation. The original Geoface program was written by Keith Waters in 1994 at Cambridge Research Laboratories, and is available from the OpenGL organisation as part of a demo package.",
"fno": "3271a135",
"keywords": [
"Visualization Tools",
"Facial Animation",
"Muscle Models",
"Synthetic Facial Expressions"
],
"authors": [
{
"affiliation": null,
"fullName": "Andreas Loizides",
"givenName": "Andreas",
"surname": "Loizides",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephania Loizidou Himona",
"givenName": "Stephania Loizidou",
"surname": "Himona",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yiorgos Chrysanthou",
"givenName": "Yiorgos",
"surname": "Chrysanthou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-07-01T00:00:00",
"pubType": "proceedings",
"pages": "135-140",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3271-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3271a129",
"articleId": "12OmNwoPttf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3271a141",
"articleId": "12OmNwwMf2p",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsee/2012/4647/3/4647c434",
"title": "A Survey of Computer Facial Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c434/12OmNAXxXhU",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420360",
"title": "Facial expression recognition using a dynamic model and motion energy",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420360/12OmNrYlmBV",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1996/7588/0/75880068",
"title": "Modeling, Tracking and Interactive Animation of Faces and Heads Using Input from Video",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1996/75880068/12OmNwfKjaJ",
"parentPublication": {
"id": "proceedings/ca/1996/7588/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2002/1594/0/15940017",
"title": "CoArt: Co-articulation Region Analysis for Control of 2D Characters",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2002/15940017/12OmNx5Yv6M",
"parentPublication": {
"id": "proceedings/ca/2002/1594/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/7/3507g048",
"title": "Anatomy-Based Modeling, Muscle-Based Animating and Math-Based Representing of Human Eyes",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507g048/12OmNxvO0ak",
"parentPublication": {
"id": "proceedings/csie/2009/3507/7",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a107",
"title": "Creating Emotional Speech for Conversational Agents",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a107/12OmNya72wB",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/2/3336d178",
"title": "A Facial Expression Calculate Method Based on Muscle Model",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336d178/12OmNykkB7O",
"parentPublication": {
"id": "proceedings/csse/2008/3336/6",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1999/0167/0/01670210",
"title": "Skin Aging Estimation by Facial Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1999/01670210/12OmNzRZpUr",
"parentPublication": {
"id": "proceedings/ca/1999/0167/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/casa/2003/1934/0/19340033",
"title": "Improvements on a Simple Muscle-Based 3D Face for Realistic Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/casa/2003/19340033/12OmNzxPTJQ",
"parentPublication": {
"id": "proceedings/casa/2003/1934/0",
"title": "Computer Animation and Social Agents, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/06/i0569",
"title": "Analysis and Synthesis of Facial Image Sequences Using Physical and Anatomical Models",
"doi": null,
"abstractUrl": "/journal/tp/1993/06/i0569/13rRUwInvg4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyO8tMO",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvoWUYc",
"doi": "10.1109/FG.2018.00058",
"title": "Say CHEESE: Common Human Emotional Expression Set Encoder and Its Application to Analyze Deceptive Communication",
"normalizedTitle": "Say CHEESE: Common Human Emotional Expression Set Encoder and Its Application to Analyze Deceptive Communication",
"abstract": "In this paper we introduce the Common Human Emotional Expression Set Encoder (CHEESE) framework for objectively determining which, if any, subsets of the facial action units associated with smiling are well represented by a small finite set of clusters according to an information theoretic metric. Smile-related AUs (6,7,10,12,14) in over 1.3M frames of facial expressions from 151 pairs of individuals playing a communication game involving deception were analyzed with CHEESE. The combination of AU6 (cheek raiser) and AU12 (lip corner puller) are shown to cluster well into five different types of expression. Liars showed high intensity AU6 and AU12 more often compared to honest speakers. Additionally, interrogators were found to express a higher frequency of low intensity AU6 with high intensity AU12 (i.e. polite smiles) when they were being lied to, suggesting that deception analysis should be done in consideration of both the message sender's and the receiver's facial expressions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we introduce the Common Human Emotional Expression Set Encoder (CHEESE) framework for objectively determining which, if any, subsets of the facial action units associated with smiling are well represented by a small finite set of clusters according to an information theoretic metric. Smile-related AUs (6,7,10,12,14) in over 1.3M frames of facial expressions from 151 pairs of individuals playing a communication game involving deception were analyzed with CHEESE. The combination of AU6 (cheek raiser) and AU12 (lip corner puller) are shown to cluster well into five different types of expression. Liars showed high intensity AU6 and AU12 more often compared to honest speakers. Additionally, interrogators were found to express a higher frequency of low intensity AU6 with high intensity AU12 (i.e. polite smiles) when they were being lied to, suggesting that deception analysis should be done in consideration of both the message sender's and the receiver's facial expressions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we introduce the Common Human Emotional Expression Set Encoder (CHEESE) framework for objectively determining which, if any, subsets of the facial action units associated with smiling are well represented by a small finite set of clusters according to an information theoretic metric. Smile-related AUs (6,7,10,12,14) in over 1.3M frames of facial expressions from 151 pairs of individuals playing a communication game involving deception were analyzed with CHEESE. The combination of AU6 (cheek raiser) and AU12 (lip corner puller) are shown to cluster well into five different types of expression. Liars showed high intensity AU6 and AU12 more often compared to honest speakers. Additionally, interrogators were found to express a higher frequency of low intensity AU6 with high intensity AU12 (i.e. polite smiles) when they were being lied to, suggesting that deception analysis should be done in consideration of both the message sender's and the receiver's facial expressions.",
"fno": "233501a357",
"keywords": [
"Emotion Recognition",
"Face Recognition",
"Facial Expressions",
"CHEESE",
"Common Human Emotional Expression Set Encoder Framework",
"Facial Action Units",
"Information Theoretic Metric",
"Smile Related A Us",
"Communication Game",
"Deception Analysis",
"Deceptive Communication",
"Face",
"Lips",
"Games",
"Video Recording",
"Dairy Products",
"Mouth",
"Psychology",
"Deception",
"Clustering",
"Facial Expression"
],
"authors": [
{
"affiliation": "Department of Computer Science University of Rochester Rochester, NY, United States",
"fullName": "Matthew Levin",
"givenName": "Matthew",
"surname": "Levin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Taylan Sen",
"givenName": "Taylan",
"surname": "Sen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Md Kamrul Hasan",
"givenName": "Md Kamrul",
"surname": "Hasan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Minh Tran",
"givenName": "Minh",
"surname": "Tran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yiming Yang",
"givenName": "Yiming",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mohammed Ehsan Hoque",
"givenName": "Mohammed Ehsan",
"surname": "Hoque",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "357-364",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-2335-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "233501a349",
"articleId": "12OmNBU1jK3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "233501a365",
"articleId": "12OmNC3FGgx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2017/0563/0/08273666",
"title": "Temporal patterns of facial expression in deceptive and honest communication",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273666/12OmNBQC8dP",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c519",
"title": "High-Stakes Deception Detection Based on Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c519/12OmNznkK02",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049691",
"title": "Emotional Voice Puppetry",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049691/1KYouSCDkQM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150616",
"title": "Challenges in Recognizing Spontaneous and Intentionally Expressed Reactions to Positive and Negative Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150616/1lPH4yHp5fy",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzZmZqY",
"title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)",
"acronym": "cgiv",
"groupId": "1001775",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwCaCrJ",
"doi": "10.1109/CGIV.2006.41",
"title": "Facial Animation Using Emotional Model",
"normalizedTitle": "Facial Animation Using Emotional Model",
"abstract": "Recent 3D graphics hardware technologies have made it possible to create visually realistic 3D characters and 3D scenes in real time. And then, the video game has become one of the main applications of 3D graphics technologies. However, behaviors of Non Player Characters (NPCs) in video games are not satisfactory because they are still predefined and then very simple. To attract game players, NPCs should have more complicated behaviors like the human. Facial expression is one of the most important factors for such a humanlike NPC. In this paper, the authors propose an NPC which interacts with the human. The NPC changes its facial expression according to its emotion during the interaction. For realizing such an NPC, the authors implemented a neural network based emotional model unit [5]. By some experiments, this paper also shows that the NPC can change its facial expression according to its emotion like the human.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent 3D graphics hardware technologies have made it possible to create visually realistic 3D characters and 3D scenes in real time. And then, the video game has become one of the main applications of 3D graphics technologies. However, behaviors of Non Player Characters (NPCs) in video games are not satisfactory because they are still predefined and then very simple. To attract game players, NPCs should have more complicated behaviors like the human. Facial expression is one of the most important factors for such a humanlike NPC. In this paper, the authors propose an NPC which interacts with the human. The NPC changes its facial expression according to its emotion during the interaction. For realizing such an NPC, the authors implemented a neural network based emotional model unit [5]. By some experiments, this paper also shows that the NPC can change its facial expression according to its emotion like the human.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent 3D graphics hardware technologies have made it possible to create visually realistic 3D characters and 3D scenes in real time. And then, the video game has become one of the main applications of 3D graphics technologies. However, behaviors of Non Player Characters (NPCs) in video games are not satisfactory because they are still predefined and then very simple. To attract game players, NPCs should have more complicated behaviors like the human. Facial expression is one of the most important factors for such a humanlike NPC. In this paper, the authors propose an NPC which interacts with the human. The NPC changes its facial expression according to its emotion during the interaction. For realizing such an NPC, the authors implemented a neural network based emotional model unit [5]. By some experiments, this paper also shows that the NPC can change its facial expression according to its emotion like the human.",
"fno": "26060428",
"keywords": [
"CG Animation",
"CG Character",
"Facial Animation",
"Neural Network",
"Emotional Model"
],
"authors": [
{
"affiliation": "Kyushu University, Japan",
"fullName": "Chihiro Kozasa",
"givenName": "Chihiro",
"surname": "Kozasa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University, Japan",
"fullName": "Hiromichi Fukutake",
"givenName": "Hiromichi",
"surname": "Fukutake",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University, Japan",
"fullName": "Hirokazu Notsu",
"givenName": "Hirokazu",
"surname": "Notsu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University, Japan",
"fullName": "Yoshihiro Okada",
"givenName": "Yoshihiro",
"surname": "Okada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University, Japan",
"fullName": "Koichi Niijima",
"givenName": "Koichi",
"surname": "Niijima",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgiv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-07-01T00:00:00",
"pubType": "proceedings",
"pages": "428-433",
"year": "2006",
"issn": null,
"isbn": "0-7695-2606-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "26060423",
"articleId": "12OmNBEYzN9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "26060434",
"articleId": "12OmNA0vnWL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2000/0580/0/00840628",
"title": "Facial tracking and animation using a 3D sensor",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2000/00840628/12OmNBlFQZ9",
"parentPublication": {
"id": "proceedings/fg/2000/0580/0",
"title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a132",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/computationworld/2009/3862/0/3862a675",
"title": "Investigating Facial Animation Production through Artistic Inquiry",
"doi": null,
"abstractUrl": "/proceedings-article/computationworld/2009/3862a675/12OmNvDqsLT",
"parentPublication": {
"id": "proceedings/computationworld/2009/3862/0",
"title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1996/7588/0/75880098",
"title": "Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2",
"parentPublication": {
"id": "proceedings/ca/1996/7588/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733c328",
"title": "Speech-Driven 3D Facial Animation with Implicit Emotional Awareness: A Deep Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733c328/12OmNxE2mG1",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a009",
"title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982374",
"title": "A physically-based model with adaptive refinement for facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982374/12OmNxRWI7R",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890231",
"title": "Real-time control of 3D facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890231/12OmNyOHG1A",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a061",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a061/12OmNzZmZrJ",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1523",
"title": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1523/13rRUxASubv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwp74rk",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzUgdes",
"doi": "10.1109/ACII.2009.5349549",
"title": "Perception of emotional expressions in different representations using facial feature points",
"normalizedTitle": "Perception of emotional expressions in different representations using facial feature points",
"abstract": "Facial expression recognition is an enabling technology for affective computing. Many existing facial expression analysis systems rely on automatically tracked facial feature points. Although psychologists have studied emotion perception from manually specified or marker-based point-light displays, no formal study exists on the amount of emotional information conveyed through automatically tracked feature points. We assess the utility of automatically extracted feature points in conveying emotions for posed and naturalistic data and present results from an experiment that compared human raters' judgements of emotional expressions between actual video clips and three automatically generated representations of them. The implications for optimal face representation and creation of realistic animations are discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Facial expression recognition is an enabling technology for affective computing. Many existing facial expression analysis systems rely on automatically tracked facial feature points. Although psychologists have studied emotion perception from manually specified or marker-based point-light displays, no formal study exists on the amount of emotional information conveyed through automatically tracked feature points. We assess the utility of automatically extracted feature points in conveying emotions for posed and naturalistic data and present results from an experiment that compared human raters' judgements of emotional expressions between actual video clips and three automatically generated representations of them. The implications for optimal face representation and creation of realistic animations are discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Facial expression recognition is an enabling technology for affective computing. Many existing facial expression analysis systems rely on automatically tracked facial feature points. Although psychologists have studied emotion perception from manually specified or marker-based point-light displays, no formal study exists on the amount of emotional information conveyed through automatically tracked feature points. We assess the utility of automatically extracted feature points in conveying emotions for posed and naturalistic data and present results from an experiment that compared human raters' judgements of emotional expressions between actual video clips and three automatically generated representations of them. The implications for optimal face representation and creation of realistic animations are discussed.",
"fno": "05349549",
"keywords": [
"Computer Animation",
"Emotion Recognition",
"Face Recognition",
"Feature Extraction",
"Emotional Expression Perception",
"Facial Feature Point",
"Facial Expression Recognition",
"Optimal Face Representation",
"Animation",
"Facial Features",
"Laboratories",
"Face Recognition",
"Psychology",
"Feature Extraction",
"Information Analysis",
"Emotion Recognition",
"Data Mining",
"Humans",
"Facial Animation"
],
"authors": [
{
"affiliation": "Univ. of Cambridge, Computer Laboratory",
"fullName": "Shazia Afzal",
"givenName": "Shazia",
"surname": "Afzal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Koc Univ., College of Engg",
"fullName": "Tevfik Metin Sezgin",
"givenName": "Tevfik Metin",
"surname": "Sezgin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Cambridge, Computer Laboratory",
"fullName": "Yujian Gao",
"givenName": "Yujian",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Cambridge, Computer Laboratory",
"fullName": "Peter Robinson",
"givenName": "Peter",
"surname": "Robinson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": "2156-8103",
"isbn": "978-1-4244-4800-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05349474",
"articleId": "12OmNwtEEDf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05349524",
"articleId": "12OmNAJ4pdW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2008/2153/0/04813317",
"title": "Emotional contagion for unseen bodily expressions: Evidence from facial EMG",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813317/12OmNAmmuOV",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/01048355",
"title": "Mapping emotional status to facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/01048355/12OmNBW0vFt",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/devlrn/2005/9226/0/01490973",
"title": "Emotional elicitation by dynamic facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ",
"parentPublication": {
"id": "proceedings/devlrn/2005/9226/0",
"title": "International Conference on Development and Learning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913374",
"title": "An Unsupervised Emotional Scene Retrieval Framework for Lifelog Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913374/12OmNBtl1Eg",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/2/01394441",
"title": "Recognition of six basic facial expressions by feature-points tracking using RBF neural network and fuzzy inference system",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394441/12OmNroijl4",
"parentPublication": {
"id": "proceedings/icme/2004/8603/2",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349538",
"title": "Integration of a semantic and affective model for realistic generation of emotional states in virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349538/12OmNxw5Byy",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/01/v0048",
"title": "Geometry-driven photorealistic facial expression synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2006/01/v0048/13rRUyY294u",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090662",
"title": "Perception of Head Motion Effect on Emotional Facial Expression in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090662/1jIxmuXW5Es",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2020/2346/0/09284396",
"title": "Group affect Recognition: Facial Feature Extraction via Color Inverted Points",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2020/09284396/1pttMnKVdJK",
"parentPublication": {
"id": "proceedings/iisa/2020/2346/0",
"title": "2020 11th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412581",
"title": "Learning Emotional-Blinded Face Representations",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412581/1tmhNuGX5K0",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxwWorv",
"title": "Visualisation, International Conference in",
"acronym": "viz",
"groupId": "1001944",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZmZrJ",
"doi": "10.1109/VIZ.2009.28",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"normalizedTitle": "Considerations for Believable Emotional Facial Expression Animation",
"abstract": "Facial expressions can be used to communicate emotional states through the use of universal signifiers within key regions of the face. Psychology research has identified what these signifiers are and how different combinations and variations can be interpreted. Research into expressions has informed animation practice, but as yet very little is known about the movement within and between emotional expressions. A better understanding of sequence, timing, and duration could better inform the production of believable animation. This paper introduces the idea of expression choreography, and how tests of observer perception might enhance our understanding of moving emotional expressions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Facial expressions can be used to communicate emotional states through the use of universal signifiers within key regions of the face. Psychology research has identified what these signifiers are and how different combinations and variations can be interpreted. Research into expressions has informed animation practice, but as yet very little is known about the movement within and between emotional expressions. A better understanding of sequence, timing, and duration could better inform the production of believable animation. This paper introduces the idea of expression choreography, and how tests of observer perception might enhance our understanding of moving emotional expressions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Facial expressions can be used to communicate emotional states through the use of universal signifiers within key regions of the face. Psychology research has identified what these signifiers are and how different combinations and variations can be interpreted. Research into expressions has informed animation practice, but as yet very little is known about the movement within and between emotional expressions. A better understanding of sequence, timing, and duration could better inform the production of believable animation. This paper introduces the idea of expression choreography, and how tests of observer perception might enhance our understanding of moving emotional expressions.",
"fno": "3734a061",
"keywords": [
"Character Animation",
"Facial Animation",
"Emotional Expression",
"Believability",
"Perception"
],
"authors": [
{
"affiliation": null,
"fullName": "Robin James Stuart Sloan",
"givenName": "Robin James Stuart",
"surname": "Sloan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Malcolm Cook",
"givenName": "Malcolm",
"surname": "Cook",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Brian Robinson",
"givenName": "Brian",
"surname": "Robinson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "viz",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "61-66",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3734-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3734a052",
"articleId": "12OmNyL0TrQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3734a067",
"articleId": "12OmNzZmZxL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840077",
"title": "\"May I talk to you? :-)\" — Facial Animation from Text",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840077/12OmNAkWveH",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/devlrn/2005/9226/0/01490973",
"title": "Emotional elicitation by dynamic facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ",
"parentPublication": {
"id": "proceedings/devlrn/2005/9226/0",
"title": "International Conference on Development and Learning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micai/2008/3441/0/3441a420",
"title": "Use of Intelligent Emotional Agents in the Animation of Autonomous Virtual Creatures",
"doi": null,
"abstractUrl": "/proceedings-article/micai/2008/3441a420/12OmNCfjevj",
"parentPublication": {
"id": "proceedings/micai/2008/3441/0",
"title": "2008 Seventh Mexican International Conference on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/casa/2003/1934/0/19340023",
"title": "How Believable Are Real Faces? Towards a Perceptual Basis for Conversational Animation",
"doi": null,
"abstractUrl": "/proceedings-article/casa/2003/19340023/12OmNqJZgzS",
"parentPublication": {
"id": "proceedings/casa/2003/1934/0",
"title": "Computer Animation and Social Agents, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a132",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/computationworld/2009/3862/0/3862a675",
"title": "Investigating Facial Animation Production through Artistic Inquiry",
"doi": null,
"abstractUrl": "/proceedings-article/computationworld/2009/3862a675/12OmNvDqsLT",
"parentPublication": {
"id": "proceedings/computationworld/2009/3862/0",
"title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2006/2606/0/26060428",
"title": "Facial Animation Using Emotional Model",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2006/26060428/12OmNwCaCrJ",
"parentPublication": {
"id": "proceedings/cgiv/2006/2606/0",
"title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a009",
"title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a107",
"title": "Creating Emotional Speech for Conversational Agents",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a107/12OmNya72wB",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1997/7984/0/79840008",
"title": "Emotional posturing: a method towards achieving emotional figure animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1997/79840008/12OmNzd7c2k",
"parentPublication": {
"id": "proceedings/ca/1997/7984/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeIzfSGjw4",
"doi": "10.1109/CVPR46437.2021.01386",
"title": "Audio-Driven Emotional Video Portraits",
"normalizedTitle": "Audio-Driven Emotional Video Portraits",
"abstract": "Despite previous success in generating audio-driven talking heads, most of the previous studies focus on the correlation between speech content and the mouth shape. Facial emotion, which is one of the most important features on natural human faces, is always neglected in their methods. In this work, we present Emotional Video Portraits (EVP), a system for synthesizing high-quality video portraits with vivid emotional dynamics driven by audios. Specifically, we propose the Cross-Reconstructed Emotion Disentanglement technique to decompose speech into two decoupled spaces, i.e., a duration-independent emotion space and a duration- dependent content space. With the disentangled features, dynamic 2D emotional facial landmarks can be deduced. Then we propose the Target-Adaptive Face Synthesis technique to generate the final high-quality video portraits, by bridging the gap between the deduced landmarks and the natural head poses of target videos. Extensive experiments demonstrate the effectiveness of our method both qualitatively and quantitatively.<sup>1</sup>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite previous success in generating audio-driven talking heads, most of the previous studies focus on the correlation between speech content and the mouth shape. Facial emotion, which is one of the most important features on natural human faces, is always neglected in their methods. In this work, we present Emotional Video Portraits (EVP), a system for synthesizing high-quality video portraits with vivid emotional dynamics driven by audios. Specifically, we propose the Cross-Reconstructed Emotion Disentanglement technique to decompose speech into two decoupled spaces, i.e., a duration-independent emotion space and a duration- dependent content space. With the disentangled features, dynamic 2D emotional facial landmarks can be deduced. Then we propose the Target-Adaptive Face Synthesis technique to generate the final high-quality video portraits, by bridging the gap between the deduced landmarks and the natural head poses of target videos. Extensive experiments demonstrate the effectiveness of our method both qualitatively and quantitatively.<sup>1</sup>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite previous success in generating audio-driven talking heads, most of the previous studies focus on the correlation between speech content and the mouth shape. Facial emotion, which is one of the most important features on natural human faces, is always neglected in their methods. In this work, we present Emotional Video Portraits (EVP), a system for synthesizing high-quality video portraits with vivid emotional dynamics driven by audios. Specifically, we propose the Cross-Reconstructed Emotion Disentanglement technique to decompose speech into two decoupled spaces, i.e., a duration-independent emotion space and a duration- dependent content space. With the disentangled features, dynamic 2D emotional facial landmarks can be deduced. Then we propose the Target-Adaptive Face Synthesis technique to generate the final high-quality video portraits, by bridging the gap between the deduced landmarks and the natural head poses of target videos. Extensive experiments demonstrate the effectiveness of our method both qualitatively and quantitatively.1",
"fno": "450900o4075",
"keywords": [
"Computer Animation",
"Emotion Recognition",
"Face Recognition",
"Speech Processing",
"Video Signal Processing",
"Disentangled Features",
"Dynamic 2 D Emotional Facial Landmarks",
"Target Adaptive Face Synthesis Technique",
"High Quality Video Portraits",
"Natural Head",
"Target Videos",
"Audio Driven Emotional Video Portraits",
"Previous Success",
"Speech Content",
"Mouth Shape",
"Facial Emotion",
"Natural Human Faces",
"Vivid Emotional Dynamics",
"Audios",
"Cross Reconstructed Emotion Disentanglement Technique",
"Decoupled Spaces",
"Duration Independent Emotion Space",
"Duration Dependent Content Space",
"Computer Vision",
"Correlation",
"Shape",
"Heuristic Algorithms",
"Mouth",
"Pattern Recognition",
"Faces"
],
"authors": [
{
"affiliation": "Nanjing University",
"fullName": "Xinya Ji",
"givenName": "Xinya",
"surname": "Ji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong",
"fullName": "Hang Zhou",
"givenName": "Hang",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Sydney",
"fullName": "Kaisiyuan Wang",
"givenName": "Kaisiyuan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SenseTime Research",
"fullName": "Wayne Wu",
"givenName": "Wayne",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,S-Lab",
"fullName": "Chen Change Loy",
"givenName": "Chen Change",
"surname": "Loy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanjing University",
"fullName": "Xun Cao",
"givenName": "Xun",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University,BNRist and School of Software",
"fullName": "Feng Xu",
"givenName": "Feng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "14075-14084",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeIz7AUm5i",
"name": "pcvpr202145090-09578513s1-mm_450900o4075.zip",
"size": "17 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578513s1-mm_450900o4075.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900o4065",
"articleId": "1yeLh7MRwCk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900o4085",
"articleId": "1yeLuazrtTy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344666",
"title": "Hierarchical modeling of temporal course in emotional expression for speech emotion recognition",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344666/12OmNBOll55",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607537",
"title": "Using emotional noise to uncloud audio-visual emotion perceptual evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607537/12OmNCuDzsI",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607572",
"title": "The Vera am Mittag German audio-visual emotional speech database",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607572/12OmNqI04KI",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2006/2555/0/25550299",
"title": "Emotional Requirements in Video Games",
"doi": null,
"abstractUrl": "/proceedings-article/re/2006/25550299/12OmNqIzgYT",
"parentPublication": {
"id": "proceedings/re/2006/2555/0",
"title": "14th IEEE International Requirements Engineering Conference (RE'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2019/01/07945502",
"title": "Audio-Visual Emotion Recognition in Video Clips",
"doi": null,
"abstractUrl": "/journal/ta/2019/01/07945502/13rRUxbCbrS",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859459",
"title": "Emotional Quality Evaluation for Generated Music Based on Emotion Recognition Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859459/1G4F0eWgLmg",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049691",
"title": "Emotional Voice Puppetry",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049691/1KYouSCDkQM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199560",
"title": "Photorealistic Audio-driven Video Portraits",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199560/1ncguu1AZdS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a504",
"title": "A Methond of Building Phoneme-Level Chinese Audio-Visual Emotional Database",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a504/1p1goPOhpPa",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a558",
"title": "An Analysis of Tourism Emotional Portraits by Web Crawler—Taking Guangxi Red Tourism as an Example",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a558/1wRItdMlbR6",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvmowTj",
"title": "First Canadian Conference on Computer and Robot Vision, 2004. Proceedings.",
"acronym": "cccrv",
"groupId": "1001794",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAH5dms",
"doi": "10.1109/CCCRV.2004.1301486",
"title": "Estimating camera motion through a 3D cluttered scene",
"normalizedTitle": "Estimating camera motion through a 3D cluttered scene",
"abstract": "Previous methods for estimating the motion of an observer through a static scene require that image velocities can be measured. For the case of motion through a cluttered 3D scene, however, measuring optical flow is problematic because of the high density of depth discontinuities. This paper introduces a method for estimating motion through a cluttered 3D scene that does not measure velocities at individual points. Instead the method measures a distribution of velocities over local image regions. We show that motion through a cluttered scene produces a bowtie pattern in the power spectra of local image regions. We show how to estimate the parameters of the bowtie for different image regions and how to use these parameters to estimate observer motion. We demonstrate our method on synthetic and real data sequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Previous methods for estimating the motion of an observer through a static scene require that image velocities can be measured. For the case of motion through a cluttered 3D scene, however, measuring optical flow is problematic because of the high density of depth discontinuities. This paper introduces a method for estimating motion through a cluttered 3D scene that does not measure velocities at individual points. Instead the method measures a distribution of velocities over local image regions. We show that motion through a cluttered scene produces a bowtie pattern in the power spectra of local image regions. We show how to estimate the parameters of the bowtie for different image regions and how to use these parameters to estimate observer motion. We demonstrate our method on synthetic and real data sequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Previous methods for estimating the motion of an observer through a static scene require that image velocities can be measured. For the case of motion through a cluttered 3D scene, however, measuring optical flow is problematic because of the high density of depth discontinuities. This paper introduces a method for estimating motion through a cluttered 3D scene that does not measure velocities at individual points. Instead the method measures a distribution of velocities over local image regions. We show that motion through a cluttered scene produces a bowtie pattern in the power spectra of local image regions. We show how to estimate the parameters of the bowtie for different image regions and how to use these parameters to estimate observer motion. We demonstrate our method on synthetic and real data sequences.",
"fno": "01301486",
"keywords": [
"Motion Estimation",
"Cameras",
"Layout",
"Image Motion Analysis",
"Snow",
"Motion Measurement",
"Velocity Measurement",
"Computer Science",
"Parameter Estimation",
"Computer Vision"
],
"authors": [],
"idPrefix": "cccrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-06-01T00:00:00",
"pubType": "proceedings",
"pages": "472-479",
"year": "2004",
"issn": null,
"isbn": "0-7695-2127-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01301485",
"articleId": "12OmNzkuKzw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01301487",
"articleId": "12OmNxj239U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2001/1143/1/00937513",
"title": "Multiple motion scene reconstruction from uncalibrated views",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937513/12OmNBqdrgc",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457631",
"title": "An iterative scheme for motion-based scene segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457631/12OmNBscCTE",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1991/2153/0/00212779",
"title": "A fast subspace algorithm for recovering rigid motion",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1991/00212779/12OmNwD1q0y",
"parentPublication": {
"id": "proceedings/wvm/1991/2153/0",
"title": "Proceedings of the IEEE Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995465",
"title": "Estimating Motion and size of moving non-line-of-sight objects in cluttered environments",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995465/12OmNwHhoM9",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977445",
"title": "Estimating Floor Regions in Cluttered Indoor Scenes from First Person Camera View",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977445/12OmNy50gfd",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2010/4268/0/4268a009",
"title": "Camera Motion Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2010/4268a009/12OmNyPQ4O1",
"parentPublication": {
"id": "proceedings/cvmp/2010/4268/0",
"title": "2010 Conference on Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201674",
"title": "On estimating a robot's motion from laser range measurements using the distance transform",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201674/12OmNyTfg9p",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131867",
"title": "Model based maglev microrobotic motion control",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131867/12OmNznCl22",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a064",
"title": "Motion Cooperation: Smooth Piece-wise Rigid Scene Flow from RGB-D Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a064/12OmNzyYibr",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/07/i0884",
"title": "Multiple Motion Scene Reconstruction with Uncalibrated Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2003/07/i0884/13rRUxlgxXt",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNylborE",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBA9oAz",
"doi": "10.1109/WACV.2018.00121",
"title": "SceneFlowFields: Dense Interpolation of Sparse Scene Flow Correspondences",
"normalizedTitle": "SceneFlowFields: Dense Interpolation of Sparse Scene Flow Correspondences",
"abstract": "While most scene flow methods use either variational optimization or a strong rigid motion assumption, we show for the first time that scene flow can also be estimated by dense interpolation of sparse matches. To this end, we find sparse matches across two stereo image pairs that are detected without any prior regularization and perform dense interpolation preserving geometric and motion boundaries by using edge information. A few iterations of variational energy minimization are performed to refine our results, which are thoroughly evaluated on the KITTI benchmark and additionally compared to state-of-the-art on MPI Sintel. For application in an automotive context, we further show that an optional ego-motion model helps to boost performance and blends smoothly into our approach to produce a segmentation of the scene into static and dynamic parts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While most scene flow methods use either variational optimization or a strong rigid motion assumption, we show for the first time that scene flow can also be estimated by dense interpolation of sparse matches. To this end, we find sparse matches across two stereo image pairs that are detected without any prior regularization and perform dense interpolation preserving geometric and motion boundaries by using edge information. A few iterations of variational energy minimization are performed to refine our results, which are thoroughly evaluated on the KITTI benchmark and additionally compared to state-of-the-art on MPI Sintel. For application in an automotive context, we further show that an optional ego-motion model helps to boost performance and blends smoothly into our approach to produce a segmentation of the scene into static and dynamic parts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While most scene flow methods use either variational optimization or a strong rigid motion assumption, we show for the first time that scene flow can also be estimated by dense interpolation of sparse matches. To this end, we find sparse matches across two stereo image pairs that are detected without any prior regularization and perform dense interpolation preserving geometric and motion boundaries by using edge information. A few iterations of variational energy minimization are performed to refine our results, which are thoroughly evaluated on the KITTI benchmark and additionally compared to state-of-the-art on MPI Sintel. For application in an automotive context, we further show that an optional ego-motion model helps to boost performance and blends smoothly into our approach to produce a segmentation of the scene into static and dynamic parts.",
"fno": "488601b056",
"keywords": [
"Image Matching",
"Image Motion Analysis",
"Image Segmentation",
"Image Sequences",
"Interpolation",
"Iterative Methods",
"Minimisation",
"Motion Estimation",
"Object Detection",
"Stereo Image Processing",
"Scene Flow Methods",
"Variational Optimization",
"Strong Rigid Motion Assumption",
"Dense Interpolation",
"Sparse Matches",
"Stereo Image Pairs",
"Variational Energy Minimization",
"Optional Ego Motion Model",
"Scene Flow Fields",
"Sparse Scene Flow Correspondences",
"Geometric Boundaries",
"Motion Boundaries",
"Edge Information",
"KITTI Benchmark",
"Scene Segmentation",
"Three Dimensional Displays",
"Motion Segmentation",
"Interpolation",
"Optical Imaging",
"Geometry",
"Image Edge Detection",
"Benchmark Testing"
],
"authors": [
{
"affiliation": null,
"fullName": "Rene Schuster",
"givenName": "Rene",
"surname": "Schuster",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Oliver Wasenmuller",
"givenName": "Oliver",
"surname": "Wasenmuller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Georg Kuschk",
"givenName": "Georg",
"surname": "Kuschk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Bailer",
"givenName": "Christian",
"surname": "Bailer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Didier Stricker",
"givenName": "Didier",
"surname": "Stricker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1056-1065",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-4886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "488601b047",
"articleId": "12OmNqBtiEx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "488601b066",
"articleId": "12OmNz61dIy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2014/4308/0/4308a138",
"title": "Dense View Interpolation on Mobile Devices Using Focal Stacks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a138/12OmNAWH9Je",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a225",
"title": "Cascaded Scene Flow Prediction Using Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a225/12OmNAolH6X",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298720",
"title": "EpicFlow: Edge-preserving interpolation of correspondences for optical flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298720/12OmNArthdN",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011999",
"title": "Edge-oriented interpolation for fractional motion estimation in hybrid video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011999/12OmNqIhFZE",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acv/1992/2840/0/00240329",
"title": "Interpolation of cinematic sequences",
"doi": null,
"abstractUrl": "/proceedings-article/acv/1992/00240329/12OmNvjgWTB",
"parentPublication": {
"id": "proceedings/acv/1992/2840/0",
"title": "Proceedings IEEE Workshop on Applications of Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410464",
"title": "Real-time environment map interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410464/12OmNx8OuBg",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g363",
"title": "InterpoNet, a Brain Inspired Neural Network for Optical Flow Dense Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g363/12OmNyprnpB",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctec/2017/5784/0/578400b230",
"title": "Frame Rate Conversion Based on Scene Change Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icctec/2017/578400b230/1cks7pe9Bhm",
"parentPublication": {
"id": "proceedings/icctec/2017/5784/0",
"title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/03/08840983",
"title": "MEMC-Net: Motion Estimation and Motion Compensation Driven Neural Network for Video Interpolation and Enhancement",
"doi": null,
"abstractUrl": "/journal/tp/2021/03/08840983/1doNwUnSB0I",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a197",
"title": "SSGP: Sparse Spatial Guided Propagation for Robust and Generic Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a197/1uqGkuuB73O",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxwWorE",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyrqzxZ",
"doi": "10.1109/ICCVW.2009.5457589",
"title": "3D model-based marker-less human motion tracking in cluttered environment",
"normalizedTitle": "3D model-based marker-less human motion tracking in cluttered environment",
"abstract": "We propose a novel 3D model-based framework for tracking 3D human motion in cluttered environment through an animatable 3D geometrical human model that resembles the subject, and which is textured with its real appearance color. Our computation synthesizes the 3D posture that minimizes the difference between the image of the synthesized movement and the real image via a numerical minimization kernel. The ill-posed problems in existing methods that heavily rely on standard image segmentation such as the background subtraction are overcome with our approach. Also in order to produce better 3D geometrical model for tracking, we proposed a three-filter set for large improvements on surface distortions of a low-cost human reconstruction method. Our results demonstrate that our method is able to cope with clutters and occlusions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel 3D model-based framework for tracking 3D human motion in cluttered environment through an animatable 3D geometrical human model that resembles the subject, and which is textured with its real appearance color. Our computation synthesizes the 3D posture that minimizes the difference between the image of the synthesized movement and the real image via a numerical minimization kernel. The ill-posed problems in existing methods that heavily rely on standard image segmentation such as the background subtraction are overcome with our approach. Also in order to produce better 3D geometrical model for tracking, we proposed a three-filter set for large improvements on surface distortions of a low-cost human reconstruction method. Our results demonstrate that our method is able to cope with clutters and occlusions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel 3D model-based framework for tracking 3D human motion in cluttered environment through an animatable 3D geometrical human model that resembles the subject, and which is textured with its real appearance color. Our computation synthesizes the 3D posture that minimizes the difference between the image of the synthesized movement and the real image via a numerical minimization kernel. The ill-posed problems in existing methods that heavily rely on standard image segmentation such as the background subtraction are overcome with our approach. Also in order to produce better 3D geometrical model for tracking, we proposed a three-filter set for large improvements on surface distortions of a low-cost human reconstruction method. Our results demonstrate that our method is able to cope with clutters and occlusions.",
"fno": "05457589",
"keywords": [
"Computer Animation",
"Filtering Theory",
"Geometry",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Reconstruction",
"Image Segmentation",
"Image Texture",
"Optical Tracking",
"Solid Modelling",
"3 D Model Based Markerless Human Motion Tracking",
"Cluttered Environment",
"Animatable 3 D Geometrical Human Model",
"Image Texture",
"Real Appearance Color",
"3 D Posture",
"Numerical Minimization Kernel",
"Image Segmentation",
"Background Subtraction",
"Three Filter Set",
"Surface Distortion",
"Low Cost Human Reconstruction",
"Humans",
"Tracking",
"Image Segmentation",
"Solid Modeling",
"Cameras",
"Biological System Modeling",
"Animation",
"Kinematics",
"Performance Analysis",
"Surveillance"
],
"authors": [
{
"affiliation": "INRIA, Domaine de Voluceau, BP105 78153 Le Chesnay, France",
"fullName": "Andre Gagalowicz",
"givenName": "Andre",
"surname": "Gagalowicz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Republic Polytechnics, Singapore",
"fullName": "Chee Kwang Quah",
"givenName": "Chee Kwang",
"surname": "Quah",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1042-1049",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4442-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05457588",
"articleId": "12OmNCfAPEu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05457586",
"articleId": "12OmNx38vMd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mnrao/1994/6435/0/00346263",
"title": "A system for human motion matching between synthetic and real images based on a biomechanic graphical model",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346263/12OmNAWH9Dj",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a421",
"title": "Towards Accurate Marker-Less Human Shape and Pose Estimation over Time",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a421/12OmNBSjIT9",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270321",
"title": "Marker-less Deformable Mesh Tracking for Human Shape and Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270321/12OmNBU1jHa",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2008/3497/2/3497b263",
"title": "Vision-Based Human Motion Analysis for Event Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2008/3497b263/12OmNBscD24",
"parentPublication": {
"id": "iita/2008/3497/2",
"title": "2008 Second International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2008/2153/0/04813309",
"title": "Complex human motion estimation using visibility",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813309/12OmNxT56Bl",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2005/2488/0/24880621",
"title": "Using Interval Particle Filtering for Marker Less 3D Human Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2005/24880621/12OmNykCcbv",
"parentPublication": {
"id": "proceedings/ictai/2005/2488/0",
"title": "17th IEEE International Conference on Tools with Artificial Intelligence (ICTAI'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771381",
"title": "The human motion database: A cognitive and parametric sampling of human motion",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771381/12OmNyyO8NC",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b253",
"title": "Harvesting Multiple Views for Marker-Less 3D Human Pose Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b253/12OmNzvz6Fs",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394212",
"title": "Reanimating real humans: automatic reconstruction of animated faces from range data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394212/12OmNzzP5JA",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/01/ttp2013010052",
"title": "Dynamical Simulation Priors for Human Motion Tracking",
"doi": null,
"abstractUrl": "/journal/tp/2013/01/ttp2013010052/13rRUxCitzI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzmclRq",
"doi": "10.1109/ICCV.2015.281",
"title": "Dense Optical Flow Prediction from a Static Image",
"normalizedTitle": "Dense Optical Flow Prediction from a Static Image",
"abstract": "Given a scene, what is going to move, and in what direction will it move? Such a question could be considered a non-semantic form of action prediction. In this work, we present a convolutional neural network (CNN) based approach for motion prediction. Given a static image, this CNN predicts the future motion of each and every pixel in the image in terms of optical flow. Our CNN model leverages the data in tens of thousands of realistic videos to train our model. Our method relies on absolutely no human labeling and is able to predict motion based on the context of the scene. Because our CNN model makes no assumptions about the underlying scene, it can predict future optical flow on a diverse set of scenarios. We outperform all previous approaches by large margins.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given a scene, what is going to move, and in what direction will it move? Such a question could be considered a non-semantic form of action prediction. In this work, we present a convolutional neural network (CNN) based approach for motion prediction. Given a static image, this CNN predicts the future motion of each and every pixel in the image in terms of optical flow. Our CNN model leverages the data in tens of thousands of realistic videos to train our model. Our method relies on absolutely no human labeling and is able to predict motion based on the context of the scene. Because our CNN model makes no assumptions about the underlying scene, it can predict future optical flow on a diverse set of scenarios. We outperform all previous approaches by large margins.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given a scene, what is going to move, and in what direction will it move? Such a question could be considered a non-semantic form of action prediction. In this work, we present a convolutional neural network (CNN) based approach for motion prediction. Given a static image, this CNN predicts the future motion of each and every pixel in the image in terms of optical flow. Our CNN model leverages the data in tens of thousands of realistic videos to train our model. Our method relies on absolutely no human labeling and is able to predict motion based on the context of the scene. Because our CNN model makes no assumptions about the underlying scene, it can predict future optical flow on a diverse set of scenarios. We outperform all previous approaches by large margins.",
"fno": "8391c443",
"keywords": [
"Optical Imaging",
"Videos",
"Predictive Models",
"Optical Losses",
"Neural Networks",
"Context",
"Trajectory"
],
"authors": [
{
"affiliation": null,
"fullName": "Jacob Walker",
"givenName": "Jacob",
"surname": "Walker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Abhinav Gupta",
"givenName": "Abhinav",
"surname": "Gupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Martial Hebert",
"givenName": "Martial",
"surname": "Hebert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2443-2451",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391c434",
"articleId": "12OmNzSyC9r",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391c452",
"articleId": "12OmNzvhvw6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d889",
"title": "Optical Flow with Semantic Segmentation and Localized Layers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d889/12OmNzyGH2R",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e884",
"title": "Occlusion Aware Unsupervised Learning of Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e884/17D45WWzW7b",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/04/09854154",
"title": "Self-Supervised Approach for Facial Movement Based Optical Flow",
"doi": null,
"abstractUrl": "/journal/ta/2022/04/09854154/1FJ0D31U1X2",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a181",
"title": "Unsupervised Learning for Optical Flow Estimation Using Pyramid Convolution LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a181/1cdOEr4ugfK",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b768",
"title": "Continuous Bidirectional Optical Flow for Video Frame Sequence Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b768/1cdOHANc2RO",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c404",
"title": "Attacking Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c404/1hQqpNiKLuM",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e897",
"title": "VOLDOR: Visual Odometry From Log-Logistic Dense Optical Flow Residuals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e897/1m3nEnPEl0s",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h394",
"title": "Self-Supervised Monocular Scene Flow Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h394/1m3odcpYzoQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b331",
"title": "Upgrading Optical Flow to 3D Scene Flow Through Optical Expansion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b331/1m3ooB64tNe",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a682",
"title": "Joint Unsupervised Learning of Optical Flow and Egomotion with Bi-Level optimization",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a682/1qyxmqwLJcs",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KxUhhFgzlK",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1L8qEyMSGu4",
"doi": "10.1109/WACV56688.2023.00038",
"title": "Placing Human Animations into 3D Scenes by Learning Interaction- and Geometry-Driven Keyframes",
"normalizedTitle": "Placing Human Animations into 3D Scenes by Learning Interaction- and Geometry-Driven Keyframes",
"abstract": "We present a novel method for placing a 3D human animation into a 3D scene while maintaining any human-scene interactions in the animation. We use the notion of computing the most important meshes in the animation for the interaction with the scene, which we call \"keyframes.\" These keyframes allow us to better optimize the placement of the animation into the scene such that interactions in the animations (standing, laying, sitting, etc.) match the affordances ofthe scene (e.g., standing on the floor or laying in a bed). We compare our method, which we call PAAK, with prior approaches, including POSA, PROX ground truth, and a motion synthesis method, and highlight the benefits of our method with a perceptual study. Human raters preferred our PAAK method over the PROX ground truth data 64.6% of the time. Additionally, in direct comparisons, the raters preferred PAAK over competing methods including 61.5% compared to POSA. Our project website is available at https://gamma.umd.edu/paak/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel method for placing a 3D human animation into a 3D scene while maintaining any human-scene interactions in the animation. We use the notion of computing the most important meshes in the animation for the interaction with the scene, which we call \"keyframes.\" These keyframes allow us to better optimize the placement of the animation into the scene such that interactions in the animations (standing, laying, sitting, etc.) match the affordances ofthe scene (e.g., standing on the floor or laying in a bed). We compare our method, which we call PAAK, with prior approaches, including POSA, PROX ground truth, and a motion synthesis method, and highlight the benefits of our method with a perceptual study. Human raters preferred our PAAK method over the PROX ground truth data 64.6% of the time. Additionally, in direct comparisons, the raters preferred PAAK over competing methods including 61.5% compared to POSA. Our project website is available at https://gamma.umd.edu/paak/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel method for placing a 3D human animation into a 3D scene while maintaining any human-scene interactions in the animation. We use the notion of computing the most important meshes in the animation for the interaction with the scene, which we call \"keyframes.\" These keyframes allow us to better optimize the placement of the animation into the scene such that interactions in the animations (standing, laying, sitting, etc.) match the affordances ofthe scene (e.g., standing on the floor or laying in a bed). We compare our method, which we call PAAK, with prior approaches, including POSA, PROX ground truth, and a motion synthesis method, and highlight the benefits of our method with a perceptual study. Human raters preferred our PAAK method over the PROX ground truth data 64.6% of the time. Additionally, in direct comparisons, the raters preferred PAAK over competing methods including 61.5% compared to POSA. Our project website is available at https://gamma.umd.edu/paak/.",
"fno": "934600a300",
"keywords": [
"Computer Animation",
"Learning Artificial Intelligence",
"Mesh Generation",
"3 D Human Animation",
"3 D Scenes",
"Geometry Driven Keyframes",
"Human Animations",
"Human Raters",
"Human Scene Interactions",
"Learning Interaction",
"Motion Synthesis Method",
"PAAK Method",
"POSA",
"PROX Ground Truth Data",
"Computer Vision",
"Three Dimensional Displays",
"Affordances",
"Animation",
"Floors",
"Algorithms Computational Photography",
"Image And Video Synthesis",
"Arts Games Social Media",
"Virtual Augmented Reality"
],
"authors": [
{
"affiliation": "University of Maryland",
"fullName": "James F. Mullen",
"givenName": "James F.",
"surname": "Mullen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland",
"fullName": "Divya Kothandaraman",
"givenName": "Divya",
"surname": "Kothandaraman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University",
"fullName": "Aniket Bera",
"givenName": "Aniket",
"surname": "Bera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland",
"fullName": "Dinesh Manocha",
"givenName": "Dinesh",
"surname": "Manocha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "300-310",
"year": "2023",
"issn": null,
"isbn": "978-1-6654-9346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "934600a289",
"articleId": "1L6LztNgYbS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "934600a311",
"articleId": "1KxUuwm9bWM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550227",
"title": "Poster: Puppetooner: A puppet-based system to interconnect real and virtual spaces for 3D animations",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550227/12OmNBcShSg",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a546",
"title": "Behind the Scenes: What Moving Targets Reveal about Static Scene Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a546/12OmNC0PGLN",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/03/v0562",
"title": "High Resolution Animated Scenes from Stills",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0562/13rRUNvya9g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/10/07636982",
"title": "Enriching Triangle Mesh Animations with Physically Based Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2017/10/07636982/13rRUxcbnHh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049652",
"title": "PACE: Data-Driven Virtual Agent Interaction in Dense and Cluttered Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049652/1KYoxzkht3W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798288",
"title": "A Real-Time Music VR System for 3D External and Internal Articulators",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798288/1cJ1fOEaYRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a964",
"title": "Online Reconstruction of Indoor Scenes With Local Manhattan Frame Growing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a964/1iTvpwOaZ68",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2020/1969/0/196900a665",
"title": "Dynamic Modeling of Interactive Scene in 3D Animation Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2020/196900a665/1wG5Vm0v6IU",
"parentPublication": {
"id": "proceedings/icris/2020/1969/0",
"title": "2020 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900o4703",
"title": "Populating 3D Scenes by Learning Human-Scene Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900o4703/1yeHZ17Dp3a",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j396",
"title": "Synthesizing Long-Term 3D Human Motion and Interaction in 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j396/1yeLL8zHwic",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeLL8zHwic",
"doi": "10.1109/CVPR46437.2021.00928",
"title": "Synthesizing Long-Term 3D Human Motion and Interaction in 3D Scenes",
"normalizedTitle": "Synthesizing Long-Term 3D Human Motion and Interaction in 3D Scenes",
"abstract": "Synthesizing 3D human motion plays an important role in many graphics applications as well as understanding human activity. While many efforts have been made on generating realistic and natural human motion, most approaches neglect the importance of modeling human-scene interactions and affordance. On the other hand, affordance reasoning (e.g., standing on the floor or sitting on the chair) has mainly been studied with static human pose and gestures, and it has rarely been addressed with human motion. In this paper, we propose to bridge human motion synthesis and scene affordance reasoning. We present a hierarchical generative framework to synthesize long-term 3D human motion conditioning on the 3D scene structure. Building on this framework, we further enforce multiple geometry constraints between the human mesh and scene point clouds via optimization to improve realistic synthesis. Our experiments show significant improvements over previous approaches on generating natural and physically plausible human motion in a scene.<sup>1</sup>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Synthesizing 3D human motion plays an important role in many graphics applications as well as understanding human activity. While many efforts have been made on generating realistic and natural human motion, most approaches neglect the importance of modeling human-scene interactions and affordance. On the other hand, affordance reasoning (e.g., standing on the floor or sitting on the chair) has mainly been studied with static human pose and gestures, and it has rarely been addressed with human motion. In this paper, we propose to bridge human motion synthesis and scene affordance reasoning. We present a hierarchical generative framework to synthesize long-term 3D human motion conditioning on the 3D scene structure. Building on this framework, we further enforce multiple geometry constraints between the human mesh and scene point clouds via optimization to improve realistic synthesis. Our experiments show significant improvements over previous approaches on generating natural and physically plausible human motion in a scene.<sup>1</sup>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Synthesizing 3D human motion plays an important role in many graphics applications as well as understanding human activity. While many efforts have been made on generating realistic and natural human motion, most approaches neglect the importance of modeling human-scene interactions and affordance. On the other hand, affordance reasoning (e.g., standing on the floor or sitting on the chair) has mainly been studied with static human pose and gestures, and it has rarely been addressed with human motion. In this paper, we propose to bridge human motion synthesis and scene affordance reasoning. We present a hierarchical generative framework to synthesize long-term 3D human motion conditioning on the 3D scene structure. Building on this framework, we further enforce multiple geometry constraints between the human mesh and scene point clouds via optimization to improve realistic synthesis. Our experiments show significant improvements over previous approaches on generating natural and physically plausible human motion in a scene.1",
"fno": "450900j396",
"keywords": [
"Computer Graphics",
"3 D Scene Structure",
"Human Activity",
"Realistic Motion",
"Natural Human Motion",
"Human Scene Interactions",
"Static Human Pose Gestures",
"Human Mesh",
"Physically Plausible Human Motion",
"Long Term 3 D Human Motion Conditioning Synthesis",
"Scene Point Clouds",
"Multiple Geometry Constraints",
"Graphics",
"Geometry",
"Computer Vision",
"Three Dimensional Displays",
"Affordances",
"Computational Modeling",
"Cognition"
],
"authors": [
{
"affiliation": "UC San Diego",
"fullName": "Jiashun Wang",
"givenName": "Jiashun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UC Berkeley",
"fullName": "Huazhe Xu",
"givenName": "Huazhe",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University",
"fullName": "Jingwei Xu",
"givenName": "Jingwei",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA",
"fullName": "Sifei Liu",
"givenName": "Sifei",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UC San Diego",
"fullName": "Xiaolong Wang",
"givenName": "Xiaolong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9396-9406",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeLL3bk34c",
"name": "pcvpr202145090-09578243s1-mm_450900j396.zip",
"size": "8.34 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578243s1-mm_450900j396.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900j387",
"articleId": "1yeJVSjdsjK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900j407",
"articleId": "1yeL7zHjMOI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995448",
"title": "From 3D scene geometry to human workspace",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995448/12OmNCwlajX",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/169520655",
"title": "Vision-Based 3D Direct Manipulation Interface for Smart Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169520655/12OmNwnYG2x",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a217",
"title": "Multi-Shot Deblurring for 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a217/12OmNxwENvB",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d778",
"title": "Inpaint2Learn: A Self-Supervised Framework for Affordance Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d778/1B13QAgvbMI",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0449",
"title": "The Wanderings of Odysseus in 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0449/1H0LdqIHLTa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a001",
"title": "MoCapDeform: Monocular 3D Human Motion Capture in Deformable Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a001/1KYso7Sd0Zy",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c154",
"title": "Fine-grained Affordance Annotation for Egocentric Hand-Object Interaction Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c154/1L6LGjMSweI",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a300",
"title": "Placing Human Animations into 3D Scenes by Learning Interaction- and Geometry-Driven Keyframes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a300/1L8qEyMSGu4",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b778",
"title": "3D AffordanceNet: A Benchmark for Visual Object Affordance Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b778/1yeJPnIUeuA",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a495",
"title": "Affordance Transfer Learning for Human-Object Interaction Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a495/1yeLTC3KRNe",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1zWE36wtuCY",
"title": "2021 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zWEdQBEcFi",
"doi": "10.1109/3DV53792.2021.00077",
"title": "SAFA: Structure Aware Face Animation",
"normalizedTitle": "SAFA: Structure Aware Face Animation",
"abstract": "Recent success of generative adversarial networks (GAN) has made great progress on the face animation task. However, the complex scene structure of a face image still makes it a challenge to generate videos with face poses significantly deviating from the source image. On one hand, without knowing the facial geometric structure, generated face images might be improperly distorted. On the other hand, some area of the generated image might be occluded in the source image, which makes it difficult for GAN to generate realistic appearance. To address these problems, we propose a structure aware face animation (SAFA) method which constructs specific geometric structures to model different components of a face image. Following the well recognized motion based face animation technique, we use a 3D morphable model (3DMM) to model the face, multiple affine transforms to model the other foreground components like hair and beard, and an identity transform to model the background. The 3DMM geometric embedding not only helps generate realistic structure for the driving scene, but also contributes to better perception of occluded area in the generated image. Besides, we further propose to exploit the widely studied inpainting technique to faithfully recover the occluded image area. Both quantitative and qualitative experiment results have shown the superiority of our method. Code is available at https://github.com/Qiulin-W/SAFA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent success of generative adversarial networks (GAN) has made great progress on the face animation task. However, the complex scene structure of a face image still makes it a challenge to generate videos with face poses significantly deviating from the source image. On one hand, without knowing the facial geometric structure, generated face images might be improperly distorted. On the other hand, some area of the generated image might be occluded in the source image, which makes it difficult for GAN to generate realistic appearance. To address these problems, we propose a structure aware face animation (SAFA) method which constructs specific geometric structures to model different components of a face image. Following the well recognized motion based face animation technique, we use a 3D morphable model (3DMM) to model the face, multiple affine transforms to model the other foreground components like hair and beard, and an identity transform to model the background. The 3DMM geometric embedding not only helps generate realistic structure for the driving scene, but also contributes to better perception of occluded area in the generated image. Besides, we further propose to exploit the widely studied inpainting technique to faithfully recover the occluded image area. Both quantitative and qualitative experiment results have shown the superiority of our method. Code is available at https://github.com/Qiulin-W/SAFA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent success of generative adversarial networks (GAN) has made great progress on the face animation task. However, the complex scene structure of a face image still makes it a challenge to generate videos with face poses significantly deviating from the source image. On one hand, without knowing the facial geometric structure, generated face images might be improperly distorted. On the other hand, some area of the generated image might be occluded in the source image, which makes it difficult for GAN to generate realistic appearance. To address these problems, we propose a structure aware face animation (SAFA) method which constructs specific geometric structures to model different components of a face image. Following the well recognized motion based face animation technique, we use a 3D morphable model (3DMM) to model the face, multiple affine transforms to model the other foreground components like hair and beard, and an identity transform to model the background. The 3DMM geometric embedding not only helps generate realistic structure for the driving scene, but also contributes to better perception of occluded area in the generated image. Besides, we further propose to exploit the widely studied inpainting technique to faithfully recover the occluded image area. Both quantitative and qualitative experiment results have shown the superiority of our method. Code is available at https://github.com/Qiulin-W/SAFA.",
"fno": "268800a679",
"keywords": [
"Computer Animation",
"Face Recognition",
"Image Motion Analysis",
"Image Texture",
"Neural Nets",
"Solid Modelling",
"Stereo Image Processing",
"SAFA",
"Generative Adversarial Networks",
"GAN",
"Face Animation Task",
"Complex Scene Structure",
"Face Image",
"Source Image",
"Facial Geometric Structure",
"Generated Face Images",
"Structure Aware Face Animation Method",
"Specific Geometric Structures",
"3 D Morphable Model",
"3 DMM Geometric Embedding",
"Realistic Structure",
"Occluded Image Area",
"Well Recognized Motion",
"Face Poses",
"Multiple Affine Transforms",
"Foreground Components",
"Https Github Com Qiulin W SAFA",
"Hair",
"Geometry",
"Solid Modeling",
"Three Dimensional Displays",
"Face Recognition",
"Transforms",
"Animation",
"Face Animation",
"3 DMM",
"GAN"
],
"authors": [
{
"affiliation": "JD Technology",
"fullName": "Qiulin Wang",
"givenName": "Qiulin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "JD Technology",
"fullName": "Lu Zhang",
"givenName": "Lu",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "JD Technology",
"fullName": "Bo Li",
"givenName": "Bo",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "679-688",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2688-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "268800a669",
"articleId": "1zWE4BJ9kEo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "268800a689",
"articleId": "1zWEaHHKCkM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sccc/2010/4400/0/4400a252",
"title": "Generic Face Animation",
"doi": null,
"abstractUrl": "/proceedings-article/sccc/2010/4400a252/12OmNBhpS0Y",
"parentPublication": {
"id": "proceedings/sccc/2010/4400/0",
"title": "2010 XXIX International Conference of the Chilean Computer Science Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720335",
"title": "3D Linear Facial Animation Based on Real Data",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720335/12OmNqGiu2b",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1997/7984/0/79840058",
"title": "Automatic 3D Cloning and Real-Time Animation of a Human Face",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1997/79840058/12OmNxEBzaw",
"parentPublication": {
"id": "proceedings/ca/1997/7984/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h346",
"title": "Nonlinear 3D Face Morphable Model",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h346/17D45Xi9rWU",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a403",
"title": "Single View 3D Face Reconstruction with Landmark Updating",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a403/19wB2vXDV28",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0279",
"title": "EMOCA: Emotion Driven Monocular Face Capture and Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0279/1H0NpdUlHGM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0061",
"title": "Face De-Occlusion Using 3D Morphable Model and Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0061/1hVlK979JGo",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102811",
"title": "Expression-Aware Face Reconstruction Via A Dual-Stream Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102811/1kwr15w4dQQ",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100b973",
"title": "DeepFake MNIST+: A DeepFake Facial Animation Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100b973/1yNipYT9XSo",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d660",
"title": "Flow-guided One-shot Talking Face Generation with a High-resolution Audio-visual Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d660/1yeJBzQChhK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx4gUtV",
"title": "2018 IEEE 38th International Conference on Distributed Computing Systems (ICDCS)",
"acronym": "icdcs",
"groupId": "1000213",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTJIKk",
"doi": "10.1109/ICDCS.2018.00168",
"title": "Low Latency Edge Rendering Scheme for Interactive 360 Degree Virtual Reality Gaming",
"normalizedTitle": "Low Latency Edge Rendering Scheme for Interactive 360 Degree Virtual Reality Gaming",
"abstract": "This paper describes the core functionality and a proof-of-concept demonstration setup for remote 360 degree stereo virtual reality (VR) gaming. In this end-to-end scheme, the execution of a VR game is off-loaded from an end user device to a cloud edge server in which the executed game is rendered based on user's field of view (FoV) and control actions. Headset and controller feedback is transmitted over the network to the server from which the rendered views of the game are streamed to a user in real-time as encoded HEVC video frames. This approach saves energy and computation load of the end terminals by making use of the latest advancements in network connection speed and quality. In the showcased demonstration, a VR game is run in Unity on a laptop powered by i7 7820HK processor and GTX 1070 GPU. The 360 degree spherical view of the game is rendered and converted to a rectangular frame using equirectangular projection (ERP). The ERP video is sliced vertically and only the FoV is encoded with Kvazaar HEVC encoder in real time and sent over the network in UDP packets. Another laptop is used for playback with a HTC Vive VR headset. Our system can reach an end-to-end latency of 30 ms and bit rate of 20 Mbps for stereo 1080p30 format.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes the core functionality and a proof-of-concept demonstration setup for remote 360 degree stereo virtual reality (VR) gaming. In this end-to-end scheme, the execution of a VR game is off-loaded from an end user device to a cloud edge server in which the executed game is rendered based on user's field of view (FoV) and control actions. Headset and controller feedback is transmitted over the network to the server from which the rendered views of the game are streamed to a user in real-time as encoded HEVC video frames. This approach saves energy and computation load of the end terminals by making use of the latest advancements in network connection speed and quality. In the showcased demonstration, a VR game is run in Unity on a laptop powered by i7 7820HK processor and GTX 1070 GPU. The 360 degree spherical view of the game is rendered and converted to a rectangular frame using equirectangular projection (ERP). The ERP video is sliced vertically and only the FoV is encoded with Kvazaar HEVC encoder in real time and sent over the network in UDP packets. Another laptop is used for playback with a HTC Vive VR headset. Our system can reach an end-to-end latency of 30 ms and bit rate of 20 Mbps for stereo 1080p30 format.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes the core functionality and a proof-of-concept demonstration setup for remote 360 degree stereo virtual reality (VR) gaming. In this end-to-end scheme, the execution of a VR game is off-loaded from an end user device to a cloud edge server in which the executed game is rendered based on user's field of view (FoV) and control actions. Headset and controller feedback is transmitted over the network to the server from which the rendered views of the game are streamed to a user in real-time as encoded HEVC video frames. This approach saves energy and computation load of the end terminals by making use of the latest advancements in network connection speed and quality. In the showcased demonstration, a VR game is run in Unity on a laptop powered by i7 7820HK processor and GTX 1070 GPU. The 360 degree spherical view of the game is rendered and converted to a rectangular frame using equirectangular projection (ERP). The ERP video is sliced vertically and only the FoV is encoded with Kvazaar HEVC encoder in real time and sent over the network in UDP packets. Another laptop is used for playback with a HTC Vive VR headset. Our system can reach an end-to-end latency of 30 ms and bit rate of 20 Mbps for stereo 1080p30 format.",
"fno": "687101b557",
"keywords": [
"Computer Games",
"Rendering Computer Graphics",
"Stereo Image Processing",
"Three Dimensional Displays",
"Video Coding",
"Video Streaming",
"Virtual Reality",
"Low Latency Edge Rendering Scheme",
"Core Functionality",
"Proof Of Concept Demonstration Setup",
"Remote 360 Degree Stereo Virtual Reality Gaming",
"End To End Scheme",
"VR Game",
"End User Device",
"Cloud Edge Server",
"Fo V",
"Controller Feedback",
"Rendered Views",
"Encoded HEVC Video Frames",
"Network Connection Speed",
"360 Degree Spherical View",
"Kvazaar HEVC Encoder",
"HTC Vive VR Headset",
"Interactive 360 Degree Virtual Reality Gaming",
"Users Field Of View",
"Control Actions",
"Network Connection Quality",
"Equirectangular Projection",
"ERP Video",
"UDP Packets",
"Streaming Media",
"Games",
"Servers",
"Graphics Processing Units",
"Rendering Computer Graphics",
"Image Coding",
"Encoding",
"Virtual Reality VR",
"Edge Computing",
"Video Coding",
"High Efficiency Video Coding HEVC",
"360 Degree Video"
],
"authors": [
{
"affiliation": null,
"fullName": "Marko Viitanen",
"givenName": "Marko",
"surname": "Viitanen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jarno Vanne",
"givenName": "Jarno",
"surname": "Vanne",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Timo D. Hämäläinen",
"givenName": "Timo D.",
"surname": "Hämäläinen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ari Kulmala",
"givenName": "Ari",
"surname": "Kulmala",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1557-1560",
"year": "2018",
"issn": "2575-8411",
"isbn": "978-1-5386-6871-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "687101b553",
"articleId": "12OmNro0HTc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "687101b561",
"articleId": "12OmNzV70sa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a107",
"title": "Adaptive 360 VR Video Streaming: Divide and Conquer",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a107/12OmNAMtAMS",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802042",
"title": "Stereoscopic rendering of virtual environments with wide Field-of-Views up to 360°",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802042/12OmNC943PW",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550203",
"title": "Navigating in virtual environments with 360° omnidirectional rendering",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550203/12OmNvzJG8K",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2017/0560/0/08026271",
"title": "Distributed rendering: Interaction delay reduction in remote rendering with client-end GPU-accelerated scene warping technique",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026271/12OmNwIpNk0",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890685",
"title": "A video encoding speed-up architecture for cloud gaming",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890685/12OmNyKrHgY",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a579",
"title": "A Personal Computer Based 360 Degree Vision Distributed Aperture System",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a579/12OmNyoAA7x",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890204",
"title": "A novel cloud gaming framework using joint video and graphics streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890204/12OmNzuZUn1",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a297",
"title": "Very Long Term Field of View Prediction for 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a297/19wB5oa2ORi",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944381",
"title": "Real-Time Compression of Dynamically Generated Images for Offscreen Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944381/1grOFDTENry",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455970",
"title": "Enhancing Quality Of Experience For Cloud Virtual Reality Gaming: An Object-Aware Video Encoding",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455970/1uCgtqwh85O",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0Wb1xK4E",
"doi": "10.1109/VR.2019.8798261",
"title": "Hybrid Projection For Encoding 360 VR Videos",
"normalizedTitle": "Hybrid Projection For Encoding 360 VR Videos",
"abstract": "During the past five years, tons of economic 360 VR cameras (e.g., Ricoh Theta, Samsumg Gear360, LG 360, Insta 360) are sold in the market. While 360 VR videos become ubiquitous very soon, 360 VR video standardization is still under discussion in the digital industry, and more concrete efforts are desired to accelerate its standardization and applications. Though ERP has been widely used for projection and packing layout while encoding 360 VR videos, it has severe projection distortion near poles. In this paper, we introduce a new format for encoding and storing 360 VR videos using hybrid cylindrical projection after thoroughly analyzing the problems with ERP. We show that our new hybrid format can minimize stretching distortion and generate well balanced pixel distribution in the resulting projection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During the past five years, tons of economic 360 VR cameras (e.g., Ricoh Theta, Samsumg Gear360, LG 360, Insta 360) are sold in the market. While 360 VR videos become ubiquitous very soon, 360 VR video standardization is still under discussion in the digital industry, and more concrete efforts are desired to accelerate its standardization and applications. Though ERP has been widely used for projection and packing layout while encoding 360 VR videos, it has severe projection distortion near poles. In this paper, we introduce a new format for encoding and storing 360 VR videos using hybrid cylindrical projection after thoroughly analyzing the problems with ERP. We show that our new hybrid format can minimize stretching distortion and generate well balanced pixel distribution in the resulting projection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During the past five years, tons of economic 360 VR cameras (e.g., Ricoh Theta, Samsumg Gear360, LG 360, Insta 360) are sold in the market. While 360 VR videos become ubiquitous very soon, 360 VR video standardization is still under discussion in the digital industry, and more concrete efforts are desired to accelerate its standardization and applications. Though ERP has been widely used for projection and packing layout while encoding 360 VR videos, it has severe projection distortion near poles. In this paper, we introduce a new format for encoding and storing 360 VR videos using hybrid cylindrical projection after thoroughly analyzing the problems with ERP. We show that our new hybrid format can minimize stretching distortion and generate well balanced pixel distribution in the resulting projection.",
"fno": "08798261",
"keywords": [
"Cameras",
"Video Coding",
"Virtual Reality",
"Hybrid Projection",
"Samsumg Gear 360",
"LG 360",
"Insta 360",
"360 VR Video Standardization",
"Hybrid Cylindrical Projection",
"360 VR Video Encoding",
"Pixel Distribution",
"Videos",
"Layout",
"Distortion",
"Encoding",
"Cameras",
"Standardization",
"Industries",
"Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "School of Computer Science & Software Engineering, East China Normal University, China",
"fullName": "Jintao Tang",
"givenName": "Jintao",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science & Software Engineering, East China Normal University, China",
"fullName": "Xinyu Zhang",
"givenName": "Xinyu",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "440-447",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797876",
"articleId": "1cJ0HMTqjOU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798177",
"articleId": "1cJ13xpYvE4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601b405",
"title": "Stabilizing First Person 360 Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892229",
"title": "6-DOF VR videos with a single 360-camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892229/12OmNAlvHtF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e753",
"title": "Automatic Content-Aware Projection for 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e753/12OmNx7G5VC",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446523",
"title": "COP: A New Continuous Packing Layout for 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446523/13bd1fKQxs3",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b420",
"title": "Cube Padding for Weakly-Supervised Saliency Prediction in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b420/17D45WB0qcO",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09874253",
"title": "BiFuse++: Self-Supervised and Efficient Bi-Projection Fusion for 360° Depth Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09874253/1Gjwzjh5yhi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10053631",
"title": "Introducing 3D Thumbnails to Access 360-Degree Videos in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10053631/1L1HXLrXmqA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a371",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09384236",
"title": "The Potential of 360° Virtual Reality Videos and Real VR for Education—A Literature Review",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09384236/1scDA5NYISI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412035",
"title": "Revisiting Optical Flow Estimation in 360 Videos",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412035/1tmi3jCoDL2",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1b26beEg",
"doi": "10.1109/VR.2019.8797971",
"title": "360-Degree Photo-realistic VR Conferencing",
"normalizedTitle": "360-Degree Photo-realistic VR Conferencing",
"abstract": "VR experiences are becoming more social, but many social VR systems represent users as artificial avatars. For use cases such as VR conferencing, photo-realistic representations may be preferred. In this paper, we present ongoing research into social VR experiences with photo-realistic representations of participants and present a web-based social VR framework that extends current video conferencing capabilities with new VR functionalities. We explain the underlying design concepts of our framework and discuss user studies to evaluate the framework in three different scenarios. We show that people are able to use VR communication in real meeting situations and outline our future research to better understand the actual benefits and limitations of our approach, to fully understand the technological gaps that need to be bridged and to better understand the user experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "VR experiences are becoming more social, but many social VR systems represent users as artificial avatars. For use cases such as VR conferencing, photo-realistic representations may be preferred. In this paper, we present ongoing research into social VR experiences with photo-realistic representations of participants and present a web-based social VR framework that extends current video conferencing capabilities with new VR functionalities. We explain the underlying design concepts of our framework and discuss user studies to evaluate the framework in three different scenarios. We show that people are able to use VR communication in real meeting situations and outline our future research to better understand the actual benefits and limitations of our approach, to fully understand the technological gaps that need to be bridged and to better understand the user experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "VR experiences are becoming more social, but many social VR systems represent users as artificial avatars. For use cases such as VR conferencing, photo-realistic representations may be preferred. In this paper, we present ongoing research into social VR experiences with photo-realistic representations of participants and present a web-based social VR framework that extends current video conferencing capabilities with new VR functionalities. We explain the underlying design concepts of our framework and discuss user studies to evaluate the framework in three different scenarios. We show that people are able to use VR communication in real meeting situations and outline our future research to better understand the actual benefits and limitations of our approach, to fully understand the technological gaps that need to be bridged and to better understand the user experience.",
"fno": "08797971",
"keywords": [
"Avatars",
"Internet",
"Teleconferencing",
"User Experience",
"Web Based Social VR Framework",
"VR Communication",
"User Experience",
"360 Degree Photo Realistic VR Conferencing",
"Artificial Avatars",
"Video Conference",
"Cameras",
"Three Dimensional Displays",
"User Experience",
"Resists",
"Face",
"Avatars",
"Virtual Reality",
"VR",
"Social VR",
"VR Conferencing",
"Web RTC",
"Web VR",
"Immersive Virtual Environments",
"Information Systems X 2014 World Wide Web X 2014 Web Conferencing",
"Information Systems X 2014 Multimedia Information Systems X 2014 Multimedia Streaming"
],
"authors": [
{
"affiliation": "Netherlands Organisation for applied scientific research (TNO)",
"fullName": "Simon N.B Gunkel",
"givenName": "Simon N.B",
"surname": "Gunkel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Netherlands Organisation for applied scientific research (TNO)",
"fullName": "Marleen D.W. Dohmen",
"givenName": "Marleen D.W.",
"surname": "Dohmen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Netherlands Organisation for applied scientific research (TNO)",
"fullName": "Hans Stokking",
"givenName": "Hans",
"surname": "Stokking",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Netherlands Organisation for applied scientific research (TNO)",
"fullName": "Omar Niamut",
"givenName": "Omar",
"surname": "Niamut",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "946-947",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797895",
"articleId": "1cJ0OiUioec",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797972",
"articleId": "1cJ0V5mcpB6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ozchi/1998/9206/0/92060004",
"title": "Designing Effective Navigation for Photo-Realistic VR Environments.",
"doi": null,
"abstractUrl": "/proceedings-article/ozchi/1998/92060004/12OmNC0y5DC",
"parentPublication": {
"id": "proceedings/ozchi/1998/9206/0",
"title": "Computer-Human Interaction, Australasian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892377",
"title": "WebVR meets WebRTC: Towards 360-degree social VR experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892377/12OmNznkJU6",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699261",
"title": "Visually Induced Motion Sickness in 360° Videos: Comparing and Combining Visual Optimization Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699261/19F1U8eRyMw",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0291",
"title": "Robust Egocentric Photo-realistic Facial Expression Transfer for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0291/1H1iHNqtaes",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a491",
"title": "Implementation of Attention-Based Spatial Audio for 360° Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a491/1J7Wlf9IrNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10053631",
"title": "Introducing 3D Thumbnails to Access 360-Degree Videos in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10053631/1L1HXLrXmqA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a106",
"title": "User Profile Analysis for Enhancing QoE of 360 Panoramic Video in Virtual Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a106/1a3x6l4ZCI8",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798264",
"title": "Watching Videos Together in Social Virtual Reality: An Experimental Study on User's QoE",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798264/1cJ107nkKk0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a540",
"title": "A Preliminary Investigation of Avatar Use in Video-Conferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a540/1tnWNeJYJAA",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxuOtbTAQ",
"doi": "10.1109/VRW50115.2020.00195",
"title": "Exploring Effect Of Different External Stimuli On Body Association In VR",
"normalizedTitle": "Exploring Effect Of Different External Stimuli On Body Association In VR",
"abstract": "Body association in VR is the extent to which users perceive a virtual body as their own. Prior research has studied the effect of tactile, visual and visuomotor stimuli on body association. Additionally, studies have been conducted to test the effect of olfactory stimuli on immersion, but how it affects body association hasn’t been explored. Through a systematic study, we compare the effect of tactile, visual, visuomotor and olfactory stimuli on body association in VR. This work paves the way towards understanding olfactory sensations and how they might affect experiences in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Body association in VR is the extent to which users perceive a virtual body as their own. Prior research has studied the effect of tactile, visual and visuomotor stimuli on body association. Additionally, studies have been conducted to test the effect of olfactory stimuli on immersion, but how it affects body association hasn’t been explored. Through a systematic study, we compare the effect of tactile, visual, visuomotor and olfactory stimuli on body association in VR. This work paves the way towards understanding olfactory sensations and how they might affect experiences in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Body association in VR is the extent to which users perceive a virtual body as their own. Prior research has studied the effect of tactile, visual and visuomotor stimuli on body association. Additionally, studies have been conducted to test the effect of olfactory stimuli on immersion, but how it affects body association hasn’t been explored. Through a systematic study, we compare the effect of tactile, visual, visuomotor and olfactory stimuli on body association in VR. This work paves the way towards understanding olfactory sensations and how they might affect experiences in VR.",
"fno": "09090609",
"keywords": [
"Olfactory",
"Visualization",
"Virtual Environments",
"Systematics",
"Mirrors",
"Conferences",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "Indian Institute of Technology,Bombay",
"fullName": "Prabodh Sakhardande",
"givenName": "Prabodh",
"surname": "Sakhardande",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology,Bombay",
"fullName": "Amarnath Murugan",
"givenName": "Amarnath",
"surname": "Murugan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology,Bombay",
"fullName": "Jayesh S.Pillai",
"givenName": "Jayesh",
"surname": "S.Pillai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "688-689",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090606",
"articleId": "1jIxlPij9Je",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090522",
"articleId": "1jIxpZciOZy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223383",
"title": "The effects of olfaction on training transfer for an assembly task",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223383/12OmNvEhg3O",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446279",
"title": "The Effects of Olfactory Stimulation and Active Participation on Food Cravings in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446279/13bd1eSlyu2",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446569",
"title": "Spatial Asynchronous Visuo-Tactile Stimuli Influence Ownership of Virtual Wings",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446569/13bd1ftOBCI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642346",
"title": "Audio-Visual-Olfactory Resource Allocation for Tri-modal Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642346/17PYElfKq78",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a474",
"title": "Simulating Olfactory Cocktail Party Effect in VR: A Multi-odor Display Approach Based on Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a474/1CJbU8KWWTS",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049717",
"title": "Eating, Smelling, and Seeing: Investigating Multisensory Integration and (In)congruent Stimuli while Eating in VR",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049717/1KYostbG9gY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08756096",
"title": "Impact of Different Sensory Stimuli on Presence in Credible Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08756096/1bpYGVRBVYc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089617",
"title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089617/1jIxfcDz7Ak",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09143472",
"title": "Do Multisensory Stimuli Benefit the Virtual Reality Experience? A Systematic Review",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09143472/1lxmwwX05lC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a279",
"title": "Does Virtual Odor Representation Influence the Perception of Olfactory Intensity and Directionality in VR?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a279/1tuAlZRpf6E",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWN2HrHuU",
"doi": "10.1109/VRW52623.2021.00251",
"title": "[DC] \"SHOW YOUR DEDICATION:\" VR Games and Outmersion",
"normalizedTitle": "[DC] \"SHOW YOUR DEDICATION:\" VR Games and Outmersion",
"abstract": "Videogame scholarship has noted the unhelpful vagueness of `immersion' as a descriptor, yet in the discourse surrounding VR entertainment, `immersion' remains a central focus, and is lauded by users as the primary goal of VR experiences. Building on the legacy of games scholarship that complicates `immersion' [5], [10] this study uses close-play to examine several emergent tropes in VR experiences that rely on outmersive design: VR suicide, bodily alteration of the avatar, and VR-within-VR. Ultimately, I advocate for increased attention to how VR's technical capacity for user immersion differs from the reality of many VR experiences that use outmersive design.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Videogame scholarship has noted the unhelpful vagueness of `immersion' as a descriptor, yet in the discourse surrounding VR entertainment, `immersion' remains a central focus, and is lauded by users as the primary goal of VR experiences. Building on the legacy of games scholarship that complicates `immersion' [5], [10] this study uses close-play to examine several emergent tropes in VR experiences that rely on outmersive design: VR suicide, bodily alteration of the avatar, and VR-within-VR. Ultimately, I advocate for increased attention to how VR's technical capacity for user immersion differs from the reality of many VR experiences that use outmersive design.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Videogame scholarship has noted the unhelpful vagueness of `immersion' as a descriptor, yet in the discourse surrounding VR entertainment, `immersion' remains a central focus, and is lauded by users as the primary goal of VR experiences. Building on the legacy of games scholarship that complicates `immersion' [5], [10] this study uses close-play to examine several emergent tropes in VR experiences that rely on outmersive design: VR suicide, bodily alteration of the avatar, and VR-within-VR. Ultimately, I advocate for increased attention to how VR's technical capacity for user immersion differs from the reality of many VR experiences that use outmersive design.",
"fno": "405700a737",
"keywords": [
"Avatars",
"Computer Games",
"Entertainment",
"User Experience",
"Virtual Reality",
"User Immersion",
"VR Experiences",
"Outmersive Design",
"Videogame Scholarship",
"VR Entertainment",
"Games Scholarship",
"VR Suicide",
"VR Within VR",
"VR Technical Capacity",
"VR Games",
"Avatar",
"Three Dimensional Displays",
"Scholarships",
"Conferences",
"Avatars",
"Buildings",
"Entertainment Industry",
"Games",
"Software And Its Engineering",
"Contextual Software Domains",
"Virtual Worlds Software",
"Interactive Games",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Central Florida",
"fullName": "PS Berge",
"givenName": "PS",
"surname": "Berge",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "737-738",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a735",
"articleId": "1tnXDI2lhHq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a739",
"articleId": "1tnWQJT7eWA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wevr/2017/3881/0/07957714",
"title": "When sound modulates vision: VR applications for art and entertainment",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957714/12OmNBvkdnd",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892258",
"title": "All are welcome: Using VR ethnography to explore harassment behavior in immersive social virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892258/12OmNx0A7Fw",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2021/1790/0/179000a553",
"title": "Design and Application of Rehabilitation Psychology Practical Teaching System Based on VR Technology",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2021/179000a553/1BQipkY1aGQ",
"parentPublication": {
"id": "proceedings/mlbdbi/2021/1790/0",
"title": "2021 3rd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a956",
"title": "[DC]Using Multimodal Input in Augmented Virtual Teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a956/1CJcYgs1MY0",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrhciai/2022/9182/0/918200a134",
"title": "MIND-VR: A Utility Approach of Human-Computer Interaction in Virtual Space based on Autonomous Consciousness",
"doi": null,
"abstractUrl": "/proceedings-article/vrhciai/2022/918200a134/1LxffWquCrK",
"parentPublication": {
"id": "proceedings/vrhciai/2022/9182/0",
"title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798301",
"title": "Keynote Speaker: Let's Unleash Entertainment! VR Possibilities Learned through Entertainment Facility “VR Zone”",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798301/1cJ1fjVwy4g",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864579",
"title": "Exergaming in VR: The Impact of Immersive Embodiment on Motivation, Performance, and Perceived Exertion",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864579/1e5ZqUGv6Cc",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090421",
"title": "Analysis of Interaction Spaces for VR in Public Transport Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090421/1jIxr9dj52o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a271",
"title": "Social Virtual Reality: Ethical Considerations and Future Directions for An Emerging Research Space",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a271/1tnXmA2qUlW",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a358",
"title": "Integrated Application of BIM and VR Technology in Architectural Interactive Design and Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a358/1vg7Xg6sLLy",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXZIKSGAM",
"doi": "10.1109/VRW52623.2021.00058",
"title": "An Interface for Enhanced Teacher Awareness of Student Actions and Attention in a VR Classroom",
"normalizedTitle": "An Interface for Enhanced Teacher Awareness of Student Actions and Attention in a VR Classroom",
"abstract": "Networked VR is gaining recognition as a way to provide remote presentations or classes when in-person meetings are difficult or risky to conduct. However, the tools do not provide as many cues about audience actions and attention as in-person meetings, for example, subtle face and body motion cues are missing. Furthermore, the field of view and visual detail are reduced, and there are added problems such as motion sickness, network disconnections, and relatively unrestricted avatar positioning. To help teachers understand and manage students in such an environment, we designed an interface to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This paper focuses on how different visual cues are integrated into an immersive VR interface that keeps relevant information about students within the teacher's visual field of attention. Cues include floating indicators, centrally-arranged face icons with gaze information, tethers and other indicators of avatar location, and options to reduce the amount of presented information. We include a pilot study of user preferences for different cue types and their parameters (such as indicator style and placement with respect to the teacher).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Networked VR is gaining recognition as a way to provide remote presentations or classes when in-person meetings are difficult or risky to conduct. However, the tools do not provide as many cues about audience actions and attention as in-person meetings, for example, subtle face and body motion cues are missing. Furthermore, the field of view and visual detail are reduced, and there are added problems such as motion sickness, network disconnections, and relatively unrestricted avatar positioning. To help teachers understand and manage students in such an environment, we designed an interface to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This paper focuses on how different visual cues are integrated into an immersive VR interface that keeps relevant information about students within the teacher's visual field of attention. Cues include floating indicators, centrally-arranged face icons with gaze information, tethers and other indicators of avatar location, and options to reduce the amount of presented information. We include a pilot study of user preferences for different cue types and their parameters (such as indicator style and placement with respect to the teacher).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Networked VR is gaining recognition as a way to provide remote presentations or classes when in-person meetings are difficult or risky to conduct. However, the tools do not provide as many cues about audience actions and attention as in-person meetings, for example, subtle face and body motion cues are missing. Furthermore, the field of view and visual detail are reduced, and there are added problems such as motion sickness, network disconnections, and relatively unrestricted avatar positioning. To help teachers understand and manage students in such an environment, we designed an interface to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This paper focuses on how different visual cues are integrated into an immersive VR interface that keeps relevant information about students within the teacher's visual field of attention. Cues include floating indicators, centrally-arranged face icons with gaze information, tethers and other indicators of avatar location, and options to reduce the amount of presented information. We include a pilot study of user preferences for different cue types and their parameters (such as indicator style and placement with respect to the teacher).",
"fno": "405700a284",
"keywords": [
"Avatars",
"Computer Aided Instruction",
"Human Computer Interaction",
"Teaching",
"Network Disconnections",
"Relatively Unrestricted Avatar Positioning",
"Social VR Environment",
"Visual Cues",
"Immersive VR Interface",
"Centrally Arranged Face Icons",
"Gaze Information",
"Avatar Location",
"Cue Types",
"Enhanced Teacher Awareness",
"Student Actions",
"VR Classroom",
"Networked VR",
"Remote Presentations",
"In Person Meetings",
"Audience Actions",
"Body Motion Cues",
"Motion Sickness",
"Teacher Visual Field Of Attention",
"User Preferences",
"Visualization",
"Three Dimensional Displays",
"Avatars",
"Face Recognition",
"Conferences",
"Education",
"User Interfaces",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Human Centered Computing",
"Visualization",
"Visualization Design And Evaluation Methods",
"Applied Computing",
"Education",
"Distance Learning"
],
"authors": [
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "David M Broussard",
"givenName": "David M",
"surname": "Broussard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Yitoshee Rahman",
"givenName": "Yitoshee",
"surname": "Rahman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Arun K Kulshreshth",
"givenName": "Arun K",
"surname": "Kulshreshth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Christoph W Borst",
"givenName": "Christoph W",
"surname": "Borst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "284-290",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXZeQtfPi",
"name": "pvrw202140570-09419173s1-mm_405700a284.zip",
"size": "82.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419173s1-mm_405700a284.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a278",
"articleId": "1tnXjaZXiw0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a291",
"articleId": "1tnX87q8ACY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446312",
"title": "VR-Assisted vs Video-Assisted Teacher Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446312/13bd1eY1x42",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a818",
"title": "Towards Controlling Whole Body Avatars with Partial Body-Tracking and Environmental Information",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a818/1CJeftFqI5W",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874383",
"title": "Multi-sensory display of self-avatar's physiological state: virtual breathing and heart beating can increase sensation of effort in VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874383/1GjwO1LML60",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a613",
"title": "Evaluating Modifying Teacher Avatar Clip Sequencing Based on Eye-Tracked Visual Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a613/1J7WepoS2w8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049707",
"title": "Inward VR: Toward a Qualitative Method for Investigating Interoceptive Awareness in VR",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049707/1KYoumHTB72",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798318",
"title": "Evaluating Teacher Avatar Appearances in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798318/1cJ0P8vBhhm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a064",
"title": "A Short Description of an Ankle-Actuated Seated VR Locomotion Interface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a064/1tnXf67lAWs",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a502",
"title": "Visual Indicators for Monitoring Students in a VR class",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a502/1tnXkpvZfqg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a788",
"title": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a788/1tuAHZj29Q4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a326",
"title": "The Impact of Avatar Appearance, Perspective and Context on Gait Variability and User Experience in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a326/1tuBfC55nck",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD43bfMc0",
"doi": "10.1109/ISMAR52148.2021.00047",
"title": "Diegetic Representations for Seamless Cross-Reality Interruptions",
"normalizedTitle": "Diegetic Representations for Seamless Cross-Reality Interruptions",
"abstract": "The closed design of virtual reality (VR) head-mounted displays substantially limits users’ awareness of their real-world surroundings. This presents challenges when another person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP), which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded reasonably high senses of co-presence, the highest quality interactions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The closed design of virtual reality (VR) head-mounted displays substantially limits users’ awareness of their real-world surroundings. This presents challenges when another person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP), which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded reasonably high senses of co-presence, the highest quality interactions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The closed design of virtual reality (VR) head-mounted displays substantially limits users’ awareness of their real-world surroundings. This presents challenges when another person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP), which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded reasonably high senses of co-presence, the highest quality interactions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions.",
"fno": "015800a310",
"keywords": [
"Helmet Mounted Displays",
"Human Computer Interaction",
"Human Factors",
"User Experience",
"Virtual Reality",
"Diegetic Representations",
"Virtual Reality Head Mounted Displays",
"Real World Surroundings",
"Virtual Activity",
"Virtual Representations",
"Seamless Cross Reality Interruptions",
"VR User Perspective",
"Cross Reality Interaction Questionnaire",
"Head Mounted Displays",
"Avatars",
"Design Methodology",
"Teleworking",
"User Experience",
"Task Analysis",
"Augmented Reality",
"Virtual Reality",
"Cross Reality",
"Diegesis",
"Interruptions",
"Human Centered Computing",
"Human Computer Interaction HCI",
"HCI Design And Evaluation Methods",
"User Studies",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Central Florida,SREAL",
"fullName": "Matt Gottsacker",
"givenName": "Matt",
"surname": "Gottsacker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida,SREAL",
"fullName": "Nahal Norouzi",
"givenName": "Nahal",
"surname": "Norouzi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida University of Calgary,SREAL",
"fullName": "Kangsoo Kim",
"givenName": "Kangsoo",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida,SREAL",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida,SREAL",
"fullName": "Greg Welch",
"givenName": "Greg",
"surname": "Welch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "310-319",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "015800a304",
"articleId": "1yeD5HpGAIE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a320",
"articleId": "1yeDb2JxLPy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/04/08260856",
"title": "NotifiVR: Exploring Interruptions and Notifications in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260856/13rRUxNmPDW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/06/09984052",
"title": "BoidVR: An Agent Simulation Environment Based on Freehand and Virtual Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2022/06/09984052/1J4y8IsgDM4",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a827",
"title": "Exploring Cues and Signaling to Improve Cross-Reality Interruptions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a827/1J7Ww0jSuxa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a131",
"title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a131/1JrRdnGe43C",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a600",
"title": "Automatic Generation of Diegetic Guidance in Cinematic Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a600/1pysw9jL61i",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a344",
"title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a345",
"title": "Spherical World in Miniature: Exploring the Tiny Planets Metaphor for Discrete Locomotion in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a345/1tuAuPBgHTi",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a118",
"title": "Exploring Head-based Mode-Switching in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a118/1yeD1RhEseY",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyYm2vR",
"title": "Multimedia Computing and Systems, International Conference on",
"acronym": "icmcs",
"groupId": "1000479",
"volume": "1",
"displayVolume": "1",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC3Xhk4",
"doi": "10.1109/MMCS.1999.779195",
"title": "Real-Time Composition of Stereo Images for Video See-Through Augmented Reality",
"normalizedTitle": "Real-Time Composition of Stereo Images for Video See-Through Augmented Reality",
"abstract": "This paper describes a method of stereo image composition for video see-through augmented reality. In order to implement an augmented reality system, we must acquire the position and orientation of the user's viewpoint to display the composed image maintaining correct registration of real and virtual worlds. All the procedures must be done in real-time. We have built a prototype augmented reality system that adopts the combination of a vision-based tracking technique and a video see-through head mounted display (HMD). Display-timing is synchronized between the real and virtual environments, so that an alignment error is reduced. The system calculates camera parameters from three markers among which physical relationships are unknown in image sequences captured by a pair of stereo cameras mounted on the HMD. In addition, the user's hands are regarded as real-world objects that may occlude virtual objects; the system estimates the depth of hands in images and generates a composed image maintaining consistent occlusions between the hands and virtual objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes a method of stereo image composition for video see-through augmented reality. In order to implement an augmented reality system, we must acquire the position and orientation of the user's viewpoint to display the composed image maintaining correct registration of real and virtual worlds. All the procedures must be done in real-time. We have built a prototype augmented reality system that adopts the combination of a vision-based tracking technique and a video see-through head mounted display (HMD). Display-timing is synchronized between the real and virtual environments, so that an alignment error is reduced. The system calculates camera parameters from three markers among which physical relationships are unknown in image sequences captured by a pair of stereo cameras mounted on the HMD. In addition, the user's hands are regarded as real-world objects that may occlude virtual objects; the system estimates the depth of hands in images and generates a composed image maintaining consistent occlusions between the hands and virtual objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes a method of stereo image composition for video see-through augmented reality. In order to implement an augmented reality system, we must acquire the position and orientation of the user's viewpoint to display the composed image maintaining correct registration of real and virtual worlds. All the procedures must be done in real-time. We have built a prototype augmented reality system that adopts the combination of a vision-based tracking technique and a video see-through head mounted display (HMD). Display-timing is synchronized between the real and virtual environments, so that an alignment error is reduced. The system calculates camera parameters from three markers among which physical relationships are unknown in image sequences captured by a pair of stereo cameras mounted on the HMD. In addition, the user's hands are regarded as real-world objects that may occlude virtual objects; the system estimates the depth of hands in images and generates a composed image maintaining consistent occlusions between the hands and virtual objects.",
"fno": "02539213",
"keywords": [
"Augmented Reality",
"Stereo Images",
"Occlusion Detection"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Masayuki Kanbara",
"givenName": "Masayuki",
"surname": "Kanbara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Takashi Okuma",
"givenName": "Takashi",
"surname": "Okuma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Haruo Takemura",
"givenName": "Haruo",
"surname": "Takemura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Naokazu Yokoya",
"givenName": "Naokazu",
"surname": "Yokoya",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmcs",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9213",
"year": "1999",
"issn": "1530-2032",
"isbn": "0-7695-0253-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "02539207",
"articleId": "12OmNqJHFA9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02539220",
"articleId": "12OmNxTEiQK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqMPfSj",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvrMUfz",
"doi": "10.1109/3DUI.2007.340779",
"title": "Usability of Hybrid, Physical and Virtual Objects for Basic Manipulation Tasks in Virtual Environments",
"normalizedTitle": "Usability of Hybrid, Physical and Virtual Objects for Basic Manipulation Tasks in Virtual Environments",
"abstract": "Integrating physical and virtual environments has been shown to improve usability of virtual reality (VR) applications. Objects within these mixed realities (MR (Milgram and Kishino, 1994)) can be hybrid physical/virtual objects that are physically manipulatable and have flexible shape and texture. We compare usability of hybrid objects for basic manipulation tasks (rotation, positioning) to physical and virtual objects. The results suggest that hybrid objects are manipulated faster than virtual objects, but not more accurately. Physical objects outperform both hybrid and virtual objects in terms of speed and accuracy. On the other hand, users felt most stimulated by the virtual objects, followed by the hybrid and physical objects. The study shows that hybrid objects \"work\" in virtual environments, but further investigations regarding the factors influencing their usability are needed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Integrating physical and virtual environments has been shown to improve usability of virtual reality (VR) applications. Objects within these mixed realities (MR (Milgram and Kishino, 1994)) can be hybrid physical/virtual objects that are physically manipulatable and have flexible shape and texture. We compare usability of hybrid objects for basic manipulation tasks (rotation, positioning) to physical and virtual objects. The results suggest that hybrid objects are manipulated faster than virtual objects, but not more accurately. Physical objects outperform both hybrid and virtual objects in terms of speed and accuracy. On the other hand, users felt most stimulated by the virtual objects, followed by the hybrid and physical objects. The study shows that hybrid objects \"work\" in virtual environments, but further investigations regarding the factors influencing their usability are needed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Integrating physical and virtual environments has been shown to improve usability of virtual reality (VR) applications. Objects within these mixed realities (MR (Milgram and Kishino, 1994)) can be hybrid physical/virtual objects that are physically manipulatable and have flexible shape and texture. We compare usability of hybrid objects for basic manipulation tasks (rotation, positioning) to physical and virtual objects. The results suggest that hybrid objects are manipulated faster than virtual objects, but not more accurately. Physical objects outperform both hybrid and virtual objects in terms of speed and accuracy. On the other hand, users felt most stimulated by the virtual objects, followed by the hybrid and physical objects. The study shows that hybrid objects \"work\" in virtual environments, but further investigations regarding the factors influencing their usability are needed.",
"fno": "04142850",
"keywords": [
"Virtual Reality Application",
"Virtual Object",
"Manipulation Task",
"Virtual Environment"
],
"authors": [
{
"affiliation": "Fraunhofer Inst. for Production Syst.&Design Technol., Berlin",
"fullName": "F. Lothar",
"givenName": "F.",
"surname": "Lothar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Inst. for Production Syst.&Design Technol., Berlin",
"fullName": "J. Habakuk",
"givenName": "J.",
"surname": "Habakuk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Inst. for Production Syst.&Design Technol., Berlin",
"fullName": "J. Neumann",
"givenName": "J.",
"surname": "Neumann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Inst. for Production Syst.&Design Technol., Berlin",
"fullName": "T. Feldmann-Wustefeld",
"givenName": "T.",
"surname": "Feldmann-Wustefeld",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "null",
"year": "2007",
"issn": null,
"isbn": "1-4244-0907-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04142849",
"articleId": "12OmNBp52GJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04142851",
"articleId": "12OmNwc3wsr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2007/3005/0/04390938",
"title": "MHaptic : a Haptic Manipulation Library for Generic Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2007/04390938/12OmNASraZe",
"parentPublication": {
"id": "proceedings/cw/2007/3005/0",
"title": "2007 International Conference on Cyberworlds (CW'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759430",
"title": "A soft hand model for physically-based manipulation of virtual objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759430/12OmNBpEeRU",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2001/0981/0/00926545",
"title": "Virtual prototypes in usability testing",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2001/00926545/12OmNC4O4DE",
"parentPublication": {
"id": "proceedings/hicss/2001/0981/2",
"title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492759",
"title": "Precise and rapid interaction through scaled manipulation in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492759/12OmNqHItGo",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2001/0981/5/09815029",
"title": "Virtual Prototypes in Usability Testing",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2001/09815029/12OmNwK7o32",
"parentPublication": {
"id": "proceedings/hicss/2001/0981/5",
"title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444724",
"title": "Revisiting path steering for 3D manipulation tasks",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444724/12OmNxAlzYX",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2007/2823/0/04159740",
"title": "Sclable Collaborative Virtual Environment Considering User's Interests Based on P2P Overlay Network",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2007/04159740/17D45XuDNGn",
"parentPublication": {
"id": "proceedings/cisis/2007/2823/0",
"title": "First International Conference on Complex, Intelligent and Software Intensive Systems (CISIS'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699326",
"title": "Evaluation of Direct Manipulation Methods in Augmented Reality Environments Using Google Glass",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699326/19F1Oa8ukP6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a067",
"title": "Virtual Reality on Product Usability Testing: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a067/1oZBCPBt7AA",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a759",
"title": "Do we still need physical monitors? An evaluation of the usability of AR virtual monitors for productivity work",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a759/1tuAI6Ij8is",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy5hRio",
"doi": "10.1109/ISMAR.2014.6948469",
"title": "[Poster] A reconstructive see-through display",
"normalizedTitle": "[Poster] A reconstructive see-through display",
"abstract": "The two most common display technologies used in augmented reality head-mounted displays are optical see-through and video see-through. In this paper I demonstrate a third alternative: reconstructive see-through. By using a commodity depth camera to construct a dense 3D model of the world and rendering this to the user, distracting latency and position discrepancies between real and virtual objects can be reduced.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The two most common display technologies used in augmented reality head-mounted displays are optical see-through and video see-through. In this paper I demonstrate a third alternative: reconstructive see-through. By using a commodity depth camera to construct a dense 3D model of the world and rendering this to the user, distracting latency and position discrepancies between real and virtual objects can be reduced.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The two most common display technologies used in augmented reality head-mounted displays are optical see-through and video see-through. In this paper I demonstrate a third alternative: reconstructive see-through. By using a commodity depth camera to construct a dense 3D model of the world and rendering this to the user, distracting latency and position discrepancies between real and virtual objects can be reduced.",
"fno": "06948469",
"keywords": [
"Cameras",
"Three Dimensional Displays",
"Solid Modeling",
"Rendering Computer Graphics",
"Computational Modeling",
"Jitter",
"Image Color Analysis",
"I 4 8 Image Processing And Computer Vision Scene Analysis Tracking",
"H 5 1 Information Systems Multimedia Information Systems Artificial Augmented Virtual Realities"
],
"authors": [
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Ky Waegel",
"givenName": "Ky",
"surname": "Waegel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "319-320",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948468",
"articleId": "12OmNBZpHbs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948470",
"articleId": "12OmNym2bQf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ozchi/1998/9206/0/92060142",
"title": "See-Through Hand",
"doi": null,
"abstractUrl": "/proceedings-article/ozchi/1998/92060142/12OmNCgJe9U",
"parentPublication": {
"id": "proceedings/ozchi/1998/9206/0",
"title": "Computer-Human Interaction, Australasian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504755",
"title": "Spatial consistency perception in optical and video see-through head-mounted augmentations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504755/12OmNqNXEli",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a245",
"title": "[POSTER] An Accurate Calibration Method for Optical See-Through Head-Mounted Displays Based on Actual Eye-Observation Model",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a245/12OmNwErpLb",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a687",
"title": "See-through Image Enhancement through Sensor Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a687/12OmNx8Ouqu",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a052",
"title": "[POSTER] Hybrid Video/Optical See-Through HMD",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551537",
"title": "Dehazing With A See-Through Near-Eye Display",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551537/17D45VUZMU4",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714124",
"title": "Video See-Through Mixed Reality with Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09844860",
"title": "Analysis of the Saliency of Color-Based Dichoptic Cues in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09844860/1Fp5UcDqu3K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a237",
"title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTf0",
"title": "2004 International Conference on Cyberworlds",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz5s0I3",
"doi": "10.1109/CW.2004.11",
"title": "A Prototype of Video See-Through Mixed Reality Interactive System",
"normalizedTitle": "A Prototype of Video See-Through Mixed Reality Interactive System",
"abstract": "Mixed reality (MR), sometimes called enhanced reality, is a variety of virtual environment (VE) which explores various natural environments with immersive display technologies. VE technologies immerse a user completely inside a synthetic environment. By contrast, MR systems add electronic data from cyberspace onto physical space, allowing users to see the real world with virtual objects superimposed upon it. Moreover, MR can assist the users' interaction with the virtual object through the real environment. The objective of our research is to investigate the potential of MR techniques for improving human and computer interaction with scientific data through developing a video-see through MR system. Further applications will be built upon this generic platform.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mixed reality (MR), sometimes called enhanced reality, is a variety of virtual environment (VE) which explores various natural environments with immersive display technologies. VE technologies immerse a user completely inside a synthetic environment. By contrast, MR systems add electronic data from cyberspace onto physical space, allowing users to see the real world with virtual objects superimposed upon it. Moreover, MR can assist the users' interaction with the virtual object through the real environment. The objective of our research is to investigate the potential of MR techniques for improving human and computer interaction with scientific data through developing a video-see through MR system. Further applications will be built upon this generic platform.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mixed reality (MR), sometimes called enhanced reality, is a variety of virtual environment (VE) which explores various natural environments with immersive display technologies. VE technologies immerse a user completely inside a synthetic environment. By contrast, MR systems add electronic data from cyberspace onto physical space, allowing users to see the real world with virtual objects superimposed upon it. Moreover, MR can assist the users' interaction with the virtual object through the real environment. The objective of our research is to investigate the potential of MR techniques for improving human and computer interaction with scientific data through developing a video-see through MR system. Further applications will be built upon this generic platform.",
"fno": "21400274",
"keywords": [],
"authors": [
{
"affiliation": "University of Technology Eindhoven, The Netherlands",
"fullName": "Wen Qi",
"givenName": "Wen",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-11-01T00:00:00",
"pubType": "proceedings",
"pages": "274-277",
"year": "2004",
"issn": null,
"isbn": "0-7695-2140-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "21400266",
"articleId": "12OmNwsNR9t",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "21400278",
"articleId": "12OmNzC5Tk3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480797",
"title": "MIRAGE: A Touch Screen based Mixed Reality Interface for Space Planning Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480797/12OmNwFidfP",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2017/3881/0/07957709",
"title": "Immersive eating: evaluating the use of head-mounted displays for mixed reality meal sessions",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957709/12OmNwK7o9G",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/auic/2001/0969/0/09690029",
"title": "Linking between real and virtual spaces: building the Mixed Reality stage environment",
"doi": null,
"abstractUrl": "/proceedings-article/auic/2001/09690029/12OmNx19k3l",
"parentPublication": {
"id": "proceedings/auic/2001/0969/0",
"title": "Australasian User Interface Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2003/1882/0/18820121",
"title": "Mixed Reality: The Continuum from Virtual to Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820121/12OmNxw5BxW",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446463",
"title": "Heterogeneous, Distributed Mixed Reality Applications. A Concept",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446463/13bd1gzWkRh",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/06/mcg2001060064",
"title": "Mixed Reality: Future Dreams Seen at the Border between Real and Virtual Worlds",
"doi": null,
"abstractUrl": "/magazine/cg/2001/06/mcg2001060064/13rRUwhpBGt",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a657",
"title": "Mixed Reality for Engineering Design Review Using Finite Element Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a657/1J7WwCL6CCQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a044",
"title": "Mixed Reality Tunneling Effects for Stereoscopic Untethered Video-See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a044/1JrR3Kf8QkE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300e452",
"title": "Assessment of Optical See-Through Head Mounted Display Calibration for Interactive Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300e452/1i5mlch2zny",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzGlRAz",
"doi": "10.1109/VR.2015.7223444",
"title": "Dynamic 3D interaction using an optical See-through HMD",
"normalizedTitle": "Dynamic 3D interaction using an optical See-through HMD",
"abstract": "We propose a system that enables dynamic 3D interaction with real and virtual objects using an optical see-through head-mounted display and an RGB-D camera. The virtual objects move according to physical laws. The system uses a physics engine for calculation of the motion of virtual objects and collision detection. In addition, the system performs collision detection between virtual objects and real objects in the three-dimensional scene obtained from the camera which is dynamically updated. A user wears the device and interacts with virtual objects in a seated position. The system gives users a great sense of reality through an interaction with virtual objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a system that enables dynamic 3D interaction with real and virtual objects using an optical see-through head-mounted display and an RGB-D camera. The virtual objects move according to physical laws. The system uses a physics engine for calculation of the motion of virtual objects and collision detection. In addition, the system performs collision detection between virtual objects and real objects in the three-dimensional scene obtained from the camera which is dynamically updated. A user wears the device and interacts with virtual objects in a seated position. The system gives users a great sense of reality through an interaction with virtual objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a system that enables dynamic 3D interaction with real and virtual objects using an optical see-through head-mounted display and an RGB-D camera. The virtual objects move according to physical laws. The system uses a physics engine for calculation of the motion of virtual objects and collision detection. In addition, the system performs collision detection between virtual objects and real objects in the three-dimensional scene obtained from the camera which is dynamically updated. A user wears the device and interacts with virtual objects in a seated position. The system gives users a great sense of reality through an interaction with virtual objects.",
"fno": "07223444",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Collision Avoidance",
"Physics",
"Optical Sensors",
"Engines",
"Virtual Reality",
"H 5 1 Multimedia Information System Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Saitama University",
"fullName": "Nozomi Sugiura",
"givenName": "Nozomi",
"surname": "Sugiura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saitama University",
"fullName": "Takashi Komuro",
"givenName": "Takashi",
"surname": "Komuro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "359-360",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223443",
"articleId": "12OmNqzu6MP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223445",
"articleId": "12OmNzhnafi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a084",
"title": "[POSTER] Natural 3D Interaction Using a See-Through Mobile AR System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a084/12OmNCcbE15",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2000/0846/0/08460165",
"title": "Optical see-through HMD calibration: a stereo method validated with a video see-through system",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2000/08460165/12OmNvlg8jS",
"parentPublication": {
"id": "proceedings/isar/2000/0846/0",
"title": "Augmented Reality, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichit/2006/2674/1/267410115",
"title": "Detecting collisions in an unstructured environment through path anticipation",
"doi": null,
"abstractUrl": "/proceedings-article/ichit/2006/267410115/12OmNwDSdgS",
"parentPublication": {
"id": "proceedings/ichit/2006/2674/1",
"title": "2006 International Conference on Hybrid Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icimt/2009/3922/0/3922a388",
"title": "A Review of Collision Avoidance Technique for Crowd Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icimt/2009/3922a388/12OmNxE2mNV",
"parentPublication": {
"id": "proceedings/icimt/2009/3922/0",
"title": "Information and Multimedia Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a052",
"title": "[POSTER] Hybrid Video/Optical See-Through HMD",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836508",
"title": "A Low Cost Optical See-Through HMD - Do-It-Yourself",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836508/12OmNyKJiwf",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223373",
"title": "Interaction with virtual agents — Comparison of the participants' experience between an IVR and a semi-IVR system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223373/12OmNyrIaAL",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2011/4420/0/4420b025",
"title": "On Visual Artifacts of Physics Simulation in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2011/4420b025/12OmNzIUfTU",
"parentPublication": {
"id": "proceedings/isuvr/2011/4420/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2018/8031/0/803100a219",
"title": "Research on Fast Collision Detection Algorithm Based on CPU Cache Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2018/803100a219/17D45WXIkEX",
"parentPublication": {
"id": "proceedings/icvris/2018/8031/0",
"title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccea/2020/5904/0/09103751",
"title": "Design and Implementation of Collision Module in Virtual Reality Scene",
"doi": null,
"abstractUrl": "/proceedings-article/iccea/2020/09103751/1kesE1Smlpe",
"parentPublication": {
"id": "proceedings/iccea/2020/5904/0",
"title": "2020 International Conference on Computer Engineering and Application (ICCEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1UA1hw40",
"doi": "10.1109/ISMAR-Adjunct.2018.00056",
"title": "Inverse Augmented Reality: A Virtual Agent's Perspective",
"normalizedTitle": "Inverse Augmented Reality: A Virtual Agent's Perspective",
"abstract": "We propose a framework called inverse augmented reality (IAR) which describes the scenario that a virtual agent living in the virtual world can observe both virtual objects and real objects. This is different from the traditional augmented reality. The traditional virtual reality, mixed reality and augmented reality are all generated for humans, i.e., they are human-centered frameworks. On the contrary, the proposed inverse augmented reality is a virtual agent-centered framework, which represents and analyzes the reality from a virtual agent's perspective. In this paper, we elaborate the framework of inverse augmented reality to argue the equivalence of the virtual world and the physical world regarding the whole physical structure.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a framework called inverse augmented reality (IAR) which describes the scenario that a virtual agent living in the virtual world can observe both virtual objects and real objects. This is different from the traditional augmented reality. The traditional virtual reality, mixed reality and augmented reality are all generated for humans, i.e., they are human-centered frameworks. On the contrary, the proposed inverse augmented reality is a virtual agent-centered framework, which represents and analyzes the reality from a virtual agent's perspective. In this paper, we elaborate the framework of inverse augmented reality to argue the equivalence of the virtual world and the physical world regarding the whole physical structure.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a framework called inverse augmented reality (IAR) which describes the scenario that a virtual agent living in the virtual world can observe both virtual objects and real objects. This is different from the traditional augmented reality. The traditional virtual reality, mixed reality and augmented reality are all generated for humans, i.e., they are human-centered frameworks. On the contrary, the proposed inverse augmented reality is a virtual agent-centered framework, which represents and analyzes the reality from a virtual agent's perspective. In this paper, we elaborate the framework of inverse augmented reality to argue the equivalence of the virtual world and the physical world regarding the whole physical structure.",
"fno": "08699316",
"keywords": [
"Augmented Reality",
"Human Computer Interaction",
"Multi Agent Systems",
"Inverse Augmented Reality",
"Virtual Agent Centered Framework",
"Virtual World",
"Virtual Objects",
"Traditional Augmented Reality",
"Traditional Virtual Reality",
"Mixed Reality",
"IAR",
"Human Centered Frameworks",
"Augmented Reality",
"Physics",
"Virtual Environments",
"Evolution Biology",
"Augmented Virtuality",
"Mathematical Model",
"Augmented Reality",
"Inverse",
"Virtual Agent"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology, Beijing Engineering Research Center of Mixed Reality and Advanced Display School of Optics and Photonics, Beijing, China",
"fullName": "Zhenliang Zhang",
"givenName": "Zhenliang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Optics and Photonics Beijing Institute of Technology, Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing, China",
"fullName": "Dongdong Weng",
"givenName": "Dongdong",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Optics and Photonics Beijing Institute of Technology, Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing, China",
"fullName": "Haiyan Jiang",
"givenName": "Haiyan",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Optics and Photonics Beijing Institute of Technology, Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing, China",
"fullName": "Yue Liu",
"givenName": "Yue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Optics and Photonics Beijing Institute of Technology, Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing, China",
"fullName": "Yongtian Wang",
"givenName": "Yongtian",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "154-157",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699183",
"articleId": "19F1MWRWSqs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699238",
"articleId": "19F1SZ9ch0I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2009/5508/0/05336726",
"title": "Loosely-coupled mixed reality: Using the environment metaphorically",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2009/05336726/12OmNCbU2Wk",
"parentPublication": {
"id": "proceedings/ismar-amh/2009/5508/0",
"title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2008/3259/0/3259a037",
"title": "Introduction of Physics Simulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2008/3259a037/12OmNvB9Fyb",
"parentPublication": {
"id": "proceedings/isuvr/2008/3259/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2011/4420/0/4420b025",
"title": "On Visual Artifacts of Physics Simulation in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2011/4420b025/12OmNzIUfTU",
"parentPublication": {
"id": "proceedings/isuvr/2011/4420/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2009/03/mpc2009030002",
"title": "Through Tinted Eyeglasses",
"doi": null,
"abstractUrl": "/magazine/pc/2009/03/mpc2009030002/13rRUx0xPki",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/04/v0346",
"title": "Calibration-Free Augmented Reality in Perspective",
"doi": null,
"abstractUrl": "/journal/tg/2000/04/v0346/13rRUxOdD2s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a327",
"title": "Augmented Virtuality Training for Special Education Teachers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a327/1J7WbAdfchq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797970",
"title": "Symmetrical Reality: Toward a Unified Framework for Physical and Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797970/1cJ1g9XNkty",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a548",
"title": "Inspiring healthy Food Choices in a Virtual Reality Supermarket by adding a tangible Dimension in the Form of an Augmented Virtuality Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a548/1tnXLta99Vm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1grOKVFffCo",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1grOMiBQIZq",
"doi": "10.1109/ISMAR.2019.00-13",
"title": "Studying Exocentric Distance Perception in Optical See-Through Augmented Reality",
"normalizedTitle": "Studying Exocentric Distance Perception in Optical See-Through Augmented Reality",
"abstract": "While perceptual biases have been widely investigated in Virtual Reality (VR), very few studies have considered the challenging environment of Optical See-through Augmented Reality (OST-AR). Moreover, regarding distance perception, existing works mainly focus on the assessment of egocentric distance perception, i.e. distance between the observer and a real or a virtual object. In this paper, we study exocentric distance perception in AR, hereby considered as the distance between two objects, none of them being directly linked to the user. We report a user study (n=29) aiming at estimating distances between two objects lying in a frontoparallel plane at 2.1m from the observer (i.e. in the medium-field perceptual space). Four conditions were tested in our study: real objects on the left and on the right of the participant (called real-real), virtual objects on both sides (virtual-virtual), a real object on the left and a virtual one on the right (real-virtual) and finally a virtual object on the left and a real object on the right (virtual-real). Participants had to reproduce the distance between the objects by spreading two real identical objects presented in front of them. The main findings of this study are the overestimation (20%) of exocentric distances for all tested conditions. Surprisingly, the real-real condition was significantly more overestimated (by about 4%, p=.0166) compared to the virtual-virtual condition, i.e. participants obtained better estimates of the exocentric distance for the virtual-virtual condition. Finally, for the virtual-real/real-virtual conditions, the analysis showed a non-symmetrical behavior, which suggests that the relationship between real and virtual objects with respect to the user might be affected by other external factors. Considered together, these unexpected results illustrate the need for additional experiments to better understand the perceptual phenomena involved in exocentric distance perception with real and virtual objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While perceptual biases have been widely investigated in Virtual Reality (VR), very few studies have considered the challenging environment of Optical See-through Augmented Reality (OST-AR). Moreover, regarding distance perception, existing works mainly focus on the assessment of egocentric distance perception, i.e. distance between the observer and a real or a virtual object. In this paper, we study exocentric distance perception in AR, hereby considered as the distance between two objects, none of them being directly linked to the user. We report a user study (n=29) aiming at estimating distances between two objects lying in a frontoparallel plane at 2.1m from the observer (i.e. in the medium-field perceptual space). Four conditions were tested in our study: real objects on the left and on the right of the participant (called real-real), virtual objects on both sides (virtual-virtual), a real object on the left and a virtual one on the right (real-virtual) and finally a virtual object on the left and a real object on the right (virtual-real). Participants had to reproduce the distance between the objects by spreading two real identical objects presented in front of them. The main findings of this study are the overestimation (20%) of exocentric distances for all tested conditions. Surprisingly, the real-real condition was significantly more overestimated (by about 4%, p=.0166) compared to the virtual-virtual condition, i.e. participants obtained better estimates of the exocentric distance for the virtual-virtual condition. Finally, for the virtual-real/real-virtual conditions, the analysis showed a non-symmetrical behavior, which suggests that the relationship between real and virtual objects with respect to the user might be affected by other external factors. Considered together, these unexpected results illustrate the need for additional experiments to better understand the perceptual phenomena involved in exocentric distance perception with real and virtual objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While perceptual biases have been widely investigated in Virtual Reality (VR), very few studies have considered the challenging environment of Optical See-through Augmented Reality (OST-AR). Moreover, regarding distance perception, existing works mainly focus on the assessment of egocentric distance perception, i.e. distance between the observer and a real or a virtual object. In this paper, we study exocentric distance perception in AR, hereby considered as the distance between two objects, none of them being directly linked to the user. We report a user study (n=29) aiming at estimating distances between two objects lying in a frontoparallel plane at 2.1m from the observer (i.e. in the medium-field perceptual space). Four conditions were tested in our study: real objects on the left and on the right of the participant (called real-real), virtual objects on both sides (virtual-virtual), a real object on the left and a virtual one on the right (real-virtual) and finally a virtual object on the left and a real object on the right (virtual-real). Participants had to reproduce the distance between the objects by spreading two real identical objects presented in front of them. The main findings of this study are the overestimation (20%) of exocentric distances for all tested conditions. Surprisingly, the real-real condition was significantly more overestimated (by about 4%, p=.0166) compared to the virtual-virtual condition, i.e. participants obtained better estimates of the exocentric distance for the virtual-virtual condition. Finally, for the virtual-real/real-virtual conditions, the analysis showed a non-symmetrical behavior, which suggests that the relationship between real and virtual objects with respect to the user might be affected by other external factors. Considered together, these unexpected results illustrate the need for additional experiments to better understand the perceptual phenomena involved in exocentric distance perception with real and virtual objects.",
"fno": "08943756",
"keywords": [
"Perception",
"Distance",
"Augmented Reality",
"User Experiment",
"Psychophysical Study"
],
"authors": [
{
"affiliation": "École Centrale de Nantes, Inria",
"fullName": "Etienne Peillard",
"givenName": "Etienne",
"surname": "Peillard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria",
"fullName": "Ferran Argelaguet",
"givenName": "Ferran",
"surname": "Argelaguet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École Centrale de Nantes",
"fullName": "Jean-Marie Normand",
"givenName": "Jean-Marie",
"surname": "Normand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria",
"fullName": "Anatole Lécuyer",
"givenName": "Anatole",
"surname": "Lécuyer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École Centrale de Nantes",
"fullName": "Guillaume Moreau",
"givenName": "Guillaume",
"surname": "Moreau",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "115-122",
"year": "2019",
"issn": "1554-7868",
"isbn": "978-1-7281-0987-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08943683",
"articleId": "1grOLPil98k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08943738",
"articleId": "1grOMePFEGc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402556",
"title": "Tablet versus phone: Depth perception in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402556/12OmNBQ2VVh",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240003",
"title": "Distance Perception in Immersive Virtual Environments, Revisited",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240003/12OmNvm6VKz",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2003/2006/0/20060198",
"title": "Consistent Illumination within Optical See-Through Augmented Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2003/20060198/12OmNwdtwaC",
"parentPublication": {
"id": "proceedings/ismar/2003/2006/0",
"title": "The Second IEEE and ACM International Symposium on Mixed and Augmented Reality, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/03/v0429",
"title": "Egocentric Depth Judgments in Optical, See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0429/13rRUxYrbM5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797826",
"title": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797826/1cJ18Y9D9Di",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090527",
"title": "Distance Perception in Modern Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090527/1jIxsZjczAc",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811002",
"title": "Improving Spatial Perception for Augmented Reality X-Ray Vision",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811002/1t0I1sqXnl6",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a122",
"title": "Augmented Reality for Maritime Navigation Assistance - Egocentric Depth Perception in Large Distance Outdoor Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a122/1tuB9Rs5D2M",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523839",
"title": "Virtual extensions improve perception-based instrument alignment using optical see-through devices",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523839/1wpqu0pXouA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a082",
"title": "Comparing Distance Judgments in Real and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a082/1yfxMk2JFHW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeCYy4wcZa",
"doi": "10.1109/ISMAR52148.2021.00029",
"title": "Investigation of Size Variations in Optical See-through Tangible Augmented Reality",
"normalizedTitle": "Investigation of Size Variations in Optical See-through Tangible Augmented Reality",
"abstract": "Optical see-through AR headsets are becoming increasingly attractive for many applications. Interaction with the virtual content is usually achieved via hand gestures or with controllers. A more seamless interaction between the real and virtual world can be achieved by using tangible objects to manipulate the virtual content. Instead of interacting with detailed physical replicas, working with abstractions allows a single physical object to represent a variety of virtual objects. These abstractions would differ from their virtual representations in shape, size, texture and material. This paper investigates for the first time in optical see-through AR whether size variations are possible without major losses in performance, usability and immersion. The conducted study shows that size can be varied within a limited range without significantly affecting task completion times as well as feelings of disturbance and presence. Stronger size deviations are possible for physical objects smaller than the virtual object than for larger physical objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optical see-through AR headsets are becoming increasingly attractive for many applications. Interaction with the virtual content is usually achieved via hand gestures or with controllers. A more seamless interaction between the real and virtual world can be achieved by using tangible objects to manipulate the virtual content. Instead of interacting with detailed physical replicas, working with abstractions allows a single physical object to represent a variety of virtual objects. These abstractions would differ from their virtual representations in shape, size, texture and material. This paper investigates for the first time in optical see-through AR whether size variations are possible without major losses in performance, usability and immersion. The conducted study shows that size can be varied within a limited range without significantly affecting task completion times as well as feelings of disturbance and presence. Stronger size deviations are possible for physical objects smaller than the virtual object than for larger physical objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optical see-through AR headsets are becoming increasingly attractive for many applications. Interaction with the virtual content is usually achieved via hand gestures or with controllers. A more seamless interaction between the real and virtual world can be achieved by using tangible objects to manipulate the virtual content. Instead of interacting with detailed physical replicas, working with abstractions allows a single physical object to represent a variety of virtual objects. These abstractions would differ from their virtual representations in shape, size, texture and material. This paper investigates for the first time in optical see-through AR whether size variations are possible without major losses in performance, usability and immersion. The conducted study shows that size can be varied within a limited range without significantly affecting task completion times as well as feelings of disturbance and presence. Stronger size deviations are possible for physical objects smaller than the virtual object than for larger physical objects.",
"fno": "015800a147",
"keywords": [
"Augmented Reality",
"Size Variations",
"Optical See Through Tangible Augmented Reality",
"Virtual Content",
"Hand Gestures",
"Seamless Interaction",
"Real World",
"Virtual World",
"Tangible Objects",
"Detailed Physical Replicas",
"Abstractions",
"Single Physical Object",
"Virtual Object",
"Virtual Representations",
"Size Deviations",
"Larger Physical Objects",
"Optical Losses",
"Headphones",
"Shape",
"Fitting",
"Lighting",
"Estimation",
"Task Analysis",
"Tangible Augmented Reality",
"Optical See Through Augmented Reality",
"Tangible Interaction",
"Haptic Devices",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality",
"Interaction Devices",
"Haptic Devices"
],
"authors": [
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Denise Kahl",
"givenName": "Denise",
"surname": "Kahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Marc Ruble",
"givenName": "Marc",
"surname": "Ruble",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Antonio Krüger",
"givenName": "Antonio",
"surname": "Krüger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "147-155",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeCY8mY8KY",
"name": "pismar202101580-09583793s1-mm_015800a147.zip",
"size": "76.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583793s1-mm_015800a147.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a138",
"articleId": "1yeD4ffM0c8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a156",
"articleId": "1yeD1Xtq86c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a100",
"title": "TactileVR: Integrating Physical Toys into Learn and Play Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a100/12OmNvFHfKP",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480753",
"title": "Symmetric Model of Remote Collaborative MR Using Tangible Replicas",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480753/12OmNyL0TDr",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223444",
"title": "Dynamic 3D interaction using an optical See-through HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223444/12OmNzGlRAz",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383324",
"title": "Effects of Configuration of Optical Combiner on Near-Field Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383324/13rRUwI5Ugg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a121",
"title": "The Influence of Environmental Lighting on Size Variations in Optical See-through Tangible Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a121/1CJc3FU1jUc",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a256",
"title": "Rotation-constrained optical see-through headset calibration with bare-hand alignment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a256/1yeD14AjfEI",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a384",
"title": "An Empirical Study of Size Discrimination in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a384/1yeQWO0csfe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9EEMQjNLO",
"doi": "10.1109/ICME52920.2022.9859694",
"title": "Structural Attention for Channel-Wise Adaptive Graph Convolution in Skeleton-Based Action Recognition",
"normalizedTitle": "Structural Attention for Channel-Wise Adaptive Graph Convolution in Skeleton-Based Action Recognition",
"abstract": "In skeleton-based action recognition, graph convolutions to model human action dynamics have been widely implemented and achieved remarkable results. Among these convolutions, channel-wise adaptive graph convolution shows outstanding performance. However, this method focuses too much on capturing correlation between joints within each channel and lacks the capability of learning structural features, which are generally hidden in geometric property of the skeleton on spatial domain. Our proposed method (SA-GCN) introduces symmetry trajectory attention module to measure the relation between left and right part of body and part relation attention module for exploration of the attention on general relation of each part. Both modules are intended to make full use of structural features in skeleton, further strengthening advantages of graph convolution. Experiments on three datasets (NW-UCLA, NTU-RGB+D and NTU-RGB+D 120) demonstrate state-of-the-art performance of our model, especially on joint modality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In skeleton-based action recognition, graph convolutions to model human action dynamics have been widely implemented and achieved remarkable results. Among these convolutions, channel-wise adaptive graph convolution shows outstanding performance. However, this method focuses too much on capturing correlation between joints within each channel and lacks the capability of learning structural features, which are generally hidden in geometric property of the skeleton on spatial domain. Our proposed method (SA-GCN) introduces symmetry trajectory attention module to measure the relation between left and right part of body and part relation attention module for exploration of the attention on general relation of each part. Both modules are intended to make full use of structural features in skeleton, further strengthening advantages of graph convolution. Experiments on three datasets (NW-UCLA, NTU-RGB+D and NTU-RGB+D 120) demonstrate state-of-the-art performance of our model, especially on joint modality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In skeleton-based action recognition, graph convolutions to model human action dynamics have been widely implemented and achieved remarkable results. Among these convolutions, channel-wise adaptive graph convolution shows outstanding performance. However, this method focuses too much on capturing correlation between joints within each channel and lacks the capability of learning structural features, which are generally hidden in geometric property of the skeleton on spatial domain. Our proposed method (SA-GCN) introduces symmetry trajectory attention module to measure the relation between left and right part of body and part relation attention module for exploration of the attention on general relation of each part. Both modules are intended to make full use of structural features in skeleton, further strengthening advantages of graph convolution. Experiments on three datasets (NW-UCLA, NTU-RGB+D and NTU-RGB+D 120) demonstrate state-of-the-art performance of our model, especially on joint modality.",
"fno": "09859694",
"keywords": [
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Image Recognition",
"Image Representation",
"Learning Artificial Intelligence",
"Object Recognition",
"Structural Attention",
"Channel Wise Adaptive Graph Convolution",
"Skeleton Based Action Recognition",
"Model Human Action Dynamics",
"Structural Features",
"Symmetry Trajectory Attention Module",
"Representation Learning",
"Adaptation Models",
"Symmetric Matrices",
"Correlation",
"Skeleton",
"Trajectory",
"Skeleton Based Action Recognition",
"Graph Convolutional Networks",
"Structural Attention Modules"
],
"authors": [
{
"affiliation": "School of Software Engineering, Tongji University,China",
"fullName": "Ruihao Qian",
"givenName": "Ruihao",
"surname": "Qian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software Engineering, Tongji University,China",
"fullName": "Jiewen Wang",
"givenName": "Jiewen",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Civil Engineering, Tongji University, China Key Laboratory of Geotechnical and Underground Engineering of Ministry of Education, Tongji University,China",
"fullName": "Jianxiu Wang",
"givenName": "Jianxiu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software Engineering, Tongji University,China",
"fullName": "Shuang Liang",
"givenName": "Shuang",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859615",
"articleId": "1G9EwjeSST6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09859843",
"articleId": "1G9ECrBEMta",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2017/0560/0/08026285",
"title": "Skeleton-based action recognition with convolutional neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026285/12OmNxFaLl9",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09666948",
"title": "Skeleton-based Action Recognition for Human-Robot Interaction using Self-Attention Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09666948/1A6BxEU8Rpu",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3339",
"title": "Channel-wise Topology Refinement Graph Convolution for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3339/1BmGFqMcFvW",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859595",
"title": "Skeletal Twins: Unsupervised Skeleton-Based Action Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859595/1G9DMR0cmd2",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859589",
"title": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859589/1G9EoJsKC76",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0154",
"title": "InfoGCN: Representation Learning for Human Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0154/1H1jIT4zVGE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500a474",
"title": "Multi-Relational Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500a474/1ua4JGNoj9S",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428403",
"title": "Recurrent Graph Convolutional Autoencoder for Unsupervised Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428403/1uilAM4szPW",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9EoJsKC76",
"doi": "10.1109/ICME52920.2022.9859589",
"title": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"normalizedTitle": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"abstract": "Skeleton-based action recognition methods which utilize graph convolution networks (GCNs) have achieved remark-able success in recent years. However, action recognizer can be easily confused by the ambiguity caused by different actions with similar skeleton sequences when only skeleton data is trained. Introducing appearance information can effectively eliminate the ambiguity. Based on this, we introduce a two-stream network for action recognition. One trained on RGB images extracts appearance information. The other trained on skeleton data models motion information and adaptively captures appearance information of action areas at action-related intervals via a specially tailored attention mechanism. Our architecture is trained and evaluated on two large-scale datasets: NTU RGB+D and NTU RGB+D 120, and a small scale human-object interaction dataset Northwestern-UCLA. Experiment results verify the effectiveness of our method and the performance of our method exceeds the state-of-the-art with a significant margin.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Skeleton-based action recognition methods which utilize graph convolution networks (GCNs) have achieved remark-able success in recent years. However, action recognizer can be easily confused by the ambiguity caused by different actions with similar skeleton sequences when only skeleton data is trained. Introducing appearance information can effectively eliminate the ambiguity. Based on this, we introduce a two-stream network for action recognition. One trained on RGB images extracts appearance information. The other trained on skeleton data models motion information and adaptively captures appearance information of action areas at action-related intervals via a specially tailored attention mechanism. Our architecture is trained and evaluated on two large-scale datasets: NTU RGB+D and NTU RGB+D 120, and a small scale human-object interaction dataset Northwestern-UCLA. Experiment results verify the effectiveness of our method and the performance of our method exceeds the state-of-the-art with a significant margin.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Skeleton-based action recognition methods which utilize graph convolution networks (GCNs) have achieved remark-able success in recent years. However, action recognizer can be easily confused by the ambiguity caused by different actions with similar skeleton sequences when only skeleton data is trained. Introducing appearance information can effectively eliminate the ambiguity. Based on this, we introduce a two-stream network for action recognition. One trained on RGB images extracts appearance information. The other trained on skeleton data models motion information and adaptively captures appearance information of action areas at action-related intervals via a specially tailored attention mechanism. Our architecture is trained and evaluated on two large-scale datasets: NTU RGB+D and NTU RGB+D 120, and a small scale human-object interaction dataset Northwestern-UCLA. Experiment results verify the effectiveness of our method and the performance of our method exceeds the state-of-the-art with a significant margin.",
"fno": "09859589",
"keywords": [
"Feature Extraction",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Recognition",
"Image Representation",
"Image Sensors",
"Image Sequences",
"Learning Artificial Intelligence",
"Object Recognition",
"Video Signal Processing",
"Skeleton Meets Appearance",
"Adaptive Appearance Information Enhancement",
"Skeleton Based Action Recognition",
"Skeleton Based Action Recognition Methods",
"Graph Convolution Networks",
"Remark Able Success",
"Action Recognizer",
"Similar Skeleton Sequences",
"Two Stream Network",
"RGB Images Extracts Appearance Information",
"Skeleton Data Models Motion Information",
"Action Areas",
"Action Related Intervals",
"Scale Human Object Interaction Dataset Northwestern UCLA",
"Image Recognition",
"Adaptive Systems",
"Convolution",
"Streaming Media",
"Skeleton",
"Data Models",
"Data Mining",
"Action Recognition",
"Skeleton Data",
"RGB Images",
"Attention"
],
"authors": [
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences,National Laboratory of Pattern Recognition,Beijing,China",
"fullName": "Suqin Wang",
"givenName": "Suqin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences,National Laboratory of Pattern Recognition,Beijing,China",
"fullName": "Lu Zhou",
"givenName": "Lu",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences,National Laboratory of Pattern Recognition,Beijing,China",
"fullName": "Yingying Chen",
"givenName": "Yingying",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Army Medical University, NCO School of PLA",
"fullName": "Jiangtao Huo",
"givenName": "Jiangtao",
"surname": "Huo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences,National Laboratory of Pattern Recognition,Beijing,China",
"fullName": "Jinqiao Wang",
"givenName": "Jinqiao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859813",
"articleId": "1G9DUJsaqyc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09859851",
"articleId": "1G9DZrLBdrq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2021/3734/0/373400a087",
"title": "A two-stream heterogeneous network for action recognition based on skeleton and RGB modalities",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a087/1A3j4pziBTq",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0154",
"title": "InfoGCN: Representation Learning for Human Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0154/1H1jIT4zVGE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a208",
"title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a208/1HbbBGVP8mk",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a074",
"title": "Skeleton Capsule Net: An Efficient Network for Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a074/1a3x5zJo5uU",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09234715",
"title": "Learning Multi-View Interactional Skeleton Graph for Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09234715/1o6HiTWfQkg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a240",
"title": "Action Recognition Based on Fusion Skeleton of Two Kinect Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a240/1p1gnv7TMI0",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455987",
"title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455987/1uCgvxv1gly",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c734",
"title": "JOLO-GCN: Mining Joint-Centered Light-Weight Information for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c734/1uqGtBJUFfa",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1jIT4zVGE",
"doi": "10.1109/CVPR52688.2022.01955",
"title": "InfoGCN: Representation Learning for Human Skeleton-based Action Recognition",
"normalizedTitle": "InfoGCN: Representation Learning for Human Skeleton-based Action Recognition",
"abstract": "Human skeleton-based action recognition offers a valuable means to understand the intricacies of human behavior because it can handle the complex relationships between physical constraints and intention. Although several studies have focused on encoding a skeleton, less attention has been paid to embed this information into the latent representations of human action. InfoGCN proposes a learning framework for action recognition combining a novel learning objective and an encoding method. First, we design an information bottleneck-based learning objective to guide the model to learn informative but compact latent representations. To provide discriminative information for classifying action, we introduce attention-based graph convolution that captures the context-dependent intrinsic topology of human action. In addition, we present a multi-modal representation of the skeleton using the relative position of joints, designed to provide complementary spatial information for joints. InfoGcn<sup>1</sup><sup>1</sup>Code is available at github.com/stnoahl/infogcn surpasses the known state-of-the-art on multiple skeleton-based action recognition benchmarks with the accuracy of 93.0% on NTU RGB+D 60 cross-subject split, 89.8% on NTU RGB+D 120 cross-subject split, and 97.0% on NW-UCLA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Human skeleton-based action recognition offers a valuable means to understand the intricacies of human behavior because it can handle the complex relationships between physical constraints and intention. Although several studies have focused on encoding a skeleton, less attention has been paid to embed this information into the latent representations of human action. InfoGCN proposes a learning framework for action recognition combining a novel learning objective and an encoding method. First, we design an information bottleneck-based learning objective to guide the model to learn informative but compact latent representations. To provide discriminative information for classifying action, we introduce attention-based graph convolution that captures the context-dependent intrinsic topology of human action. In addition, we present a multi-modal representation of the skeleton using the relative position of joints, designed to provide complementary spatial information for joints. InfoGcn<sup>1</sup><sup>1</sup>Code is available at github.com/stnoahl/infogcn surpasses the known state-of-the-art on multiple skeleton-based action recognition benchmarks with the accuracy of 93.0% on NTU RGB+D 60 cross-subject split, 89.8% on NTU RGB+D 120 cross-subject split, and 97.0% on NW-UCLA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Human skeleton-based action recognition offers a valuable means to understand the intricacies of human behavior because it can handle the complex relationships between physical constraints and intention. Although several studies have focused on encoding a skeleton, less attention has been paid to embed this information into the latent representations of human action. InfoGCN proposes a learning framework for action recognition combining a novel learning objective and an encoding method. First, we design an information bottleneck-based learning objective to guide the model to learn informative but compact latent representations. To provide discriminative information for classifying action, we introduce attention-based graph convolution that captures the context-dependent intrinsic topology of human action. In addition, we present a multi-modal representation of the skeleton using the relative position of joints, designed to provide complementary spatial information for joints. InfoGcn11Code is available at github.com/stnoahl/infogcn surpasses the known state-of-the-art on multiple skeleton-based action recognition benchmarks with the accuracy of 93.0% on NTU RGB+D 60 cross-subject split, 89.8% on NTU RGB+D 120 cross-subject split, and 97.0% on NW-UCLA.",
"fno": "694600u0154",
"keywords": [
"Feature Extraction",
"Image Motion Analysis",
"Image Recognition",
"Image Representation",
"Learning Artificial Intelligence",
"Object Recognition",
"Info GCN",
"Representation Learning",
"Human Skeleton Based Action Recognition",
"Human Behavior",
"Human Action",
"Learning Framework",
"Learning Objective",
"Information Bottleneck Based",
"Informative But Compact Latent Representations",
"Attention Based Graph Convolution",
"Info Gcn",
"Multiple Skeleton Based Action Recognition Benchmarks",
"Representation Learning",
"Computer Vision",
"Convolution",
"Design Methodology",
"Benchmark Testing",
"Skeleton",
"Encoding"
],
"authors": [
{
"affiliation": "Purdue University,School of Electrical & Computer Engineering,West Lafayette,USA",
"fullName": "Hyung-Gun Chi",
"givenName": "Hyung-Gun",
"surname": "Chi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST,Daejeon,South Korea",
"fullName": "Myoung Hoon Ha",
"givenName": "Myoung Hoon",
"surname": "Ha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University,School of Electrical & Computer Engineering,West Lafayette,USA",
"fullName": "Seunggeun Chi",
"givenName": "Seunggeun",
"surname": "Chi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST,Daejeon,South Korea",
"fullName": "Sang Wan Lee",
"givenName": "Sang Wan",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Austin,Austin,USA",
"fullName": "Qixing Huang",
"givenName": "Qixing",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University,School of Electrical & Computer Engineering,West Lafayette,USA",
"fullName": "Karthik Ramani",
"givenName": "Karthik",
"surname": "Ramani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "20154-20164",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1jIP53Igo",
"name": "pcvpr202269460-09879266s1-mm_694600u0154.zip",
"size": "17.2 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879266s1-mm_694600u0154.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600u0142",
"articleId": "1H0LhH83qZa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600u0165",
"articleId": "1H1ibVRJ2Le",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2023/02/09729609",
"title": "Constructing Stronger and Faster Baselines for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/02/09729609/1Bya4OgVwLC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/8.739E158",
"title": "Bootstrapped Representation Learning for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/8.739E158/1G56D6Ye4eY",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859595",
"title": "Skeletal Twins: Unsupervised Skeleton-Based Action Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859595/1G9DMR0cmd2",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859589",
"title": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859589/1G9EoJsKC76",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093598",
"title": "Long-Short Graph Memory Network for Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093598/1jPbhEzWWwU",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09234715",
"title": "Learning Multi-View Interactional Skeleton Graph for Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09234715/1o6HiTWfQkg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428459",
"title": "Hierarchical Transformer: Unsupervised Representation Learning for Skeleton-Based Human Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428459/1uilZj80gso",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1HbbualfG7e",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"acronym": "iscer",
"groupId": "1847824",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1HbbBGVP8mk",
"doi": "10.1109/ISCER55570.2022.00042",
"title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"normalizedTitle": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"abstract": "Graph convolutional networks is widely used in the field of skeleton-based motion recognition because of its characteristics of applying to non-Euclidean data. But most of the existing methods based on graph convolutional networks only perform convolution between adjacent joint points, ignoring the connection with farther joint points and symmetrical points. In order to improve the accuracy of skeleton-based action recognition, we propose a novel Inception spatial temporal graph convolutional networks (IST-GCN) for skeleton-based action recognition. By introducing the symmetry characteristics of the skeleton, our model can extract the interactive features of the symmetrical part of the human body. We also use the idea of multi-scale convolution to improve graph convolutional networks and temporal convolutional networks based on the Inception structure to better extract spatial and temporal features. A large number of experiments on NTU-RGB+D dataset show that our models have achieved higher accuracy and are more suitable for application in skeleton-based motion recognition. Codes are available at https://github.com/julycrow/IST-GCN.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graph convolutional networks is widely used in the field of skeleton-based motion recognition because of its characteristics of applying to non-Euclidean data. But most of the existing methods based on graph convolutional networks only perform convolution between adjacent joint points, ignoring the connection with farther joint points and symmetrical points. In order to improve the accuracy of skeleton-based action recognition, we propose a novel Inception spatial temporal graph convolutional networks (IST-GCN) for skeleton-based action recognition. By introducing the symmetry characteristics of the skeleton, our model can extract the interactive features of the symmetrical part of the human body. We also use the idea of multi-scale convolution to improve graph convolutional networks and temporal convolutional networks based on the Inception structure to better extract spatial and temporal features. A large number of experiments on NTU-RGB+D dataset show that our models have achieved higher accuracy and are more suitable for application in skeleton-based motion recognition. Codes are available at https://github.com/julycrow/IST-GCN.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graph convolutional networks is widely used in the field of skeleton-based motion recognition because of its characteristics of applying to non-Euclidean data. But most of the existing methods based on graph convolutional networks only perform convolution between adjacent joint points, ignoring the connection with farther joint points and symmetrical points. In order to improve the accuracy of skeleton-based action recognition, we propose a novel Inception spatial temporal graph convolutional networks (IST-GCN) for skeleton-based action recognition. By introducing the symmetry characteristics of the skeleton, our model can extract the interactive features of the symmetrical part of the human body. We also use the idea of multi-scale convolution to improve graph convolutional networks and temporal convolutional networks based on the Inception structure to better extract spatial and temporal features. A large number of experiments on NTU-RGB+D dataset show that our models have achieved higher accuracy and are more suitable for application in skeleton-based motion recognition. Codes are available at https://github.com/julycrow/IST-GCN.",
"fno": "847800a208",
"keywords": [
"Convolutional Neural Nets",
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Inception Spatial Temporal Graph Convolutional Networks",
"Skeleton Based Action Recognition",
"Skeleton Based Motion Recognition",
"Adjacent Joint Points",
"Farther Joint Points",
"Multiscale Convolution",
"Temporal Convolutional Networks",
"Spatial Features",
"Temporal Features",
"Convolutional Codes",
"Control Engineering",
"Convolution",
"Biological System Modeling",
"Feature Extraction",
"Skeleton",
"Data Mining",
"Action Recognition",
"Graph Convolution Network",
"Multi Scale"
],
"authors": [
{
"affiliation": "Anhui University, Hefei Institutes of Physical Science, Chinese Academy of Sciences,Hefei,China",
"fullName": "Mingkun Jiang",
"givenName": "Mingkun",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hefei Institutes of Physical Science, Chinese Academy of Sciences,Hefei,China",
"fullName": "Jun Dong",
"givenName": "Jun",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hefei Institutes of Physical Science, Chinese Academy of Sciences,Hefei,China",
"fullName": "Dong Ma",
"givenName": "Dong",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Anhui University, Hefei Institutes of Physical Science, Chinese Academy of Sciences,Hefei,China",
"fullName": "Jianjing Sun",
"givenName": "Jianjing",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hefei Institutes of Physical Science, Chinese Academy of Sciences,Hefei,China",
"fullName": "Junming He",
"givenName": "Junming",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wuhu Institute of Technology,Wuhu,China",
"fullName": "Luhong Lang",
"givenName": "Luhong",
"surname": "Lang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscer",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-02-01T00:00:00",
"pubType": "proceedings",
"pages": "208-213",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8478-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "847800a202",
"articleId": "1Hbbv1fJQmQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "847800a214",
"articleId": "1HbbAm1x9S0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cecit/2021/3757/0/375700b150",
"title": "Spatial-Temporal Graph Convolutional Networks for Action Recognition with Adjacency Matrix Generation Network",
"doi": null,
"abstractUrl": "/proceedings-article/cecit/2021/375700b150/1CdEBDZSXPW",
"parentPublication": {
"id": "proceedings/cecit/2021/3757/0",
"title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859752",
"title": "GLTA-GCN: Global-Local Temporal Attention Graph Convolutional Network for Unsupervised Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859752/1G9DAPwJBzW",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859787",
"title": "MKE-GCN: Multi-Modal Knowledge Embedded Graph Convolutional Network for Skeleton-Based Action Recognition in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859787/1G9DHYOMoSI",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/5555/01/09998567",
"title": "Skeleton-Based Action Segmentation with Multi-Stage Spatial-Temporal Graph Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/ec/5555/01/09998567/1JlF7XHA1Hy",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a180",
"title": "Skeleton-Based Action Recognition With Shift Graph Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a180/1m3nFwgro2Y",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4321",
"title": "Context Aware Graph Convolution for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4321/1m3o0gFXGaQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455987",
"title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455987/1uCgvxv1gly",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHpyw9hnQ4",
"doi": "10.1109/ICPR56361.2022.9956108",
"title": "A Graph Convolutional Network with Early Attention Module for Skeleton-based Action Prediction",
"normalizedTitle": "A Graph Convolutional Network with Early Attention Module for Skeleton-based Action Prediction",
"abstract": "This paper addresses the problem of skeleton-based action prediction, which aims to predict the action label when the skeleton sequence is partially observed. The action prediction task is more challenging compared to the after-the-fact action recognition since it needs to make decisions according to the beginning part of action executions. The existing methods improve the action prediction performance by taking advantage of the global action knowledge in full sequences, and some of them require the correspondence between a partial sequence and its associated full sequence. In this paper, we step towards a new direction by exploiting the discriminative information in early observations of actions as much as possible. We propose a Graph Convolutional Network with Early Attention Module (GCN-EAM), which employs a series of spatial-temporal graph convolution blocks to extract features from skeletons. In order to infer the action category as fast as possible, we introduce an early attention module to adaptively emphasize discriminative observations at the beginning stage of actions. The proposed method is evaluated on the large-scale NTU-RGB+D dataset and achieves excellent performance for action prediction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses the problem of skeleton-based action prediction, which aims to predict the action label when the skeleton sequence is partially observed. The action prediction task is more challenging compared to the after-the-fact action recognition since it needs to make decisions according to the beginning part of action executions. The existing methods improve the action prediction performance by taking advantage of the global action knowledge in full sequences, and some of them require the correspondence between a partial sequence and its associated full sequence. In this paper, we step towards a new direction by exploiting the discriminative information in early observations of actions as much as possible. We propose a Graph Convolutional Network with Early Attention Module (GCN-EAM), which employs a series of spatial-temporal graph convolution blocks to extract features from skeletons. In order to infer the action category as fast as possible, we introduce an early attention module to adaptively emphasize discriminative observations at the beginning stage of actions. The proposed method is evaluated on the large-scale NTU-RGB+D dataset and achieves excellent performance for action prediction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses the problem of skeleton-based action prediction, which aims to predict the action label when the skeleton sequence is partially observed. The action prediction task is more challenging compared to the after-the-fact action recognition since it needs to make decisions according to the beginning part of action executions. The existing methods improve the action prediction performance by taking advantage of the global action knowledge in full sequences, and some of them require the correspondence between a partial sequence and its associated full sequence. In this paper, we step towards a new direction by exploiting the discriminative information in early observations of actions as much as possible. We propose a Graph Convolutional Network with Early Attention Module (GCN-EAM), which employs a series of spatial-temporal graph convolution blocks to extract features from skeletons. In order to infer the action category as fast as possible, we introduce an early attention module to adaptively emphasize discriminative observations at the beginning stage of actions. The proposed method is evaluated on the large-scale NTU-RGB+D dataset and achieves excellent performance for action prediction.",
"fno": "09956108",
"keywords": [
"Convolutional Neural Nets",
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Image Sequences",
"Action Category",
"Action Executions",
"Action Label",
"Action Prediction Performance",
"Action Prediction Task",
"After The Fact Action Recognition",
"Early Attention Module",
"GCN EAM",
"Global Action Knowledge",
"Graph Convolutional Network",
"Partial Sequence",
"Skeleton Sequence",
"Skeleton Based Action Prediction",
"Spatial Temporal Graph Convolution Blocks",
"Convolution",
"Feature Extraction",
"Skeleton",
"Character Recognition",
"Task Analysis"
],
"authors": [
{
"affiliation": "Shenyang Aerospace University,School of Computer Science,Shenyang,China",
"fullName": "Cuiwei Liu",
"givenName": "Cuiwei",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenyang Aerospace University,School of Computer Science,Shenyang,China",
"fullName": "Xiaoxue Zhao",
"givenName": "Xiaoxue",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenyang Aerospace University,School of Artificial Intelligence,Shenyang,China",
"fullName": "Zhuo Yan",
"givenName": "Zhuo",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenyang Aerospace University,School of Computer Science,Shenyang,China",
"fullName": "Youzhi Jiang",
"givenName": "Youzhi",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenyang Aerospace University,School of Computer Science,Shenyang,China",
"fullName": "Xiangbin Shi",
"givenName": "Xiangbin",
"surname": "Shi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1266-1272",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956679",
"articleId": "1IHqdedRb4Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956701",
"articleId": "1IHpdhvLkYg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2020/06/08640046",
"title": "Skeleton-Based Online Action Prediction Using Scale Selection Network",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08640046/17D45WrVg7h",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546012",
"title": "Action Recognition with Visual Attention on Skeleton Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546012/17D45WwsQ53",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859589",
"title": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859589/1G9EoJsKC76",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c959",
"title": "Revisiting Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c959/1H1m03S0tDq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956662",
"title": "Temporal Shift and Attention Modules for Graphical Skeleton Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956662/1IHpKe3Q9Fu",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300b227",
"title": "An Attention Enhanced Graph Convolutional LSTM Network for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300b227/1gyrM0FYz5e",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412113",
"title": "Temporal Extension Module for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412113/1tminzkoTCg",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c734",
"title": "JOLO-GCN: Mining Joint-Centered Light-Weight Information for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c734/1uqGtBJUFfa",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1a3x4M4IIJa",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "1a3x5zJo5uU",
"doi": "10.1109/ICVRV.2018.00022",
"title": "Skeleton Capsule Net: An Efficient Network for Action Recognition",
"normalizedTitle": "Skeleton Capsule Net: An Efficient Network for Action Recognition",
"abstract": "Capsule network is a new type of deep learning method to improve the CNN module. Though it has performed quite well on classifying the MNIST dataset, there are few applications in other fields. Thus in this paper, we apply the capsule network on skeleton-based classification and propose a framework to explore the potential of it. Since the bottom layer of the capsule network is still based on convolution operation, we feed heatmap as well as raw skeleton data and reach good performance on convolution-based action recognition. Most researches take spatial and temporal features into consideration and they do help to recognition accuracy. We propose two different encapsulations to extract the spatial and temporal features of skeleton sequences. We perform our experiments on UT-Kinect and a portion of NTU RGB+D dataset, and we achieve best 87% accuracy on the NTU RGB+D dataset. We also find that the capsule network is suitable for the coarse-grained classification tasks. In a conclusion, not only the characteristics of capsule network are proved, but also an efficient method to recognize human action is realized.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Capsule network is a new type of deep learning method to improve the CNN module. Though it has performed quite well on classifying the MNIST dataset, there are few applications in other fields. Thus in this paper, we apply the capsule network on skeleton-based classification and propose a framework to explore the potential of it. Since the bottom layer of the capsule network is still based on convolution operation, we feed heatmap as well as raw skeleton data and reach good performance on convolution-based action recognition. Most researches take spatial and temporal features into consideration and they do help to recognition accuracy. We propose two different encapsulations to extract the spatial and temporal features of skeleton sequences. We perform our experiments on UT-Kinect and a portion of NTU RGB+D dataset, and we achieve best 87% accuracy on the NTU RGB+D dataset. We also find that the capsule network is suitable for the coarse-grained classification tasks. In a conclusion, not only the characteristics of capsule network are proved, but also an efficient method to recognize human action is realized.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Capsule network is a new type of deep learning method to improve the CNN module. Though it has performed quite well on classifying the MNIST dataset, there are few applications in other fields. Thus in this paper, we apply the capsule network on skeleton-based classification and propose a framework to explore the potential of it. Since the bottom layer of the capsule network is still based on convolution operation, we feed heatmap as well as raw skeleton data and reach good performance on convolution-based action recognition. Most researches take spatial and temporal features into consideration and they do help to recognition accuracy. We propose two different encapsulations to extract the spatial and temporal features of skeleton sequences. We perform our experiments on UT-Kinect and a portion of NTU RGB+D dataset, and we achieve best 87% accuracy on the NTU RGB+D dataset. We also find that the capsule network is suitable for the coarse-grained classification tasks. In a conclusion, not only the characteristics of capsule network are proved, but also an efficient method to recognize human action is realized.",
"fno": "849700a074",
"keywords": [
"Convolution",
"Feature Extraction",
"Image Classification",
"Image Motion Analysis",
"Image Representation",
"Image Sequences",
"Learning Artificial Intelligence",
"Skeleton Capsule Net",
"Capsule Network",
"Skeleton Based Classification",
"Raw Skeleton Data",
"Convolution Based Action Recognition",
"Spatial Features",
"Temporal Features",
"NTU RGB D Dataset",
"Heatmap",
"Skeleton Sequences",
"UT Kinect",
"Coarse Grained Classification Tasks",
"Skeleton",
"Convolution",
"Encapsulation",
"Heuristic Algorithms",
"Feature Extraction",
"Routing",
"Three Dimensional Displays",
"Skeleton",
"Capsule Network",
"Action Recognition",
"Motion Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Yue Yu",
"givenName": "Yue",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Niehao Tian",
"givenName": "Niehao",
"surname": "Tian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiangru Chen",
"givenName": "Xiangru",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ying Li",
"givenName": "Ying",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "74-77",
"year": "2018",
"issn": "2375-141X",
"isbn": "978-1-5386-8497-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "849700a070",
"articleId": "1a3x84OTSIo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "849700a078",
"articleId": "1a3x884r54c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2017/0560/0/08026285",
"title": "Skeleton-based action recognition with convolutional neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026285/12OmNxFaLl9",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a087",
"title": "A two-stream heterogeneous network for action recognition based on skeleton and RGB modalities",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a087/1A3j4pziBTq",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/02/09729609",
"title": "Constructing Stronger and Faster Baselines for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/02/09729609/1Bya4OgVwLC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859589",
"title": "When Skeleton Meets Appearance: Adaptive Appearance Information Enhancement for Skeleton Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859589/1G9EoJsKC76",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0154",
"title": "InfoGCN: Representation Learning for Human Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0154/1H1jIT4zVGE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09234715",
"title": "Learning Multi-View Interactional Skeleton Graph for Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09234715/1o6HiTWfQkg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a183",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c734",
"title": "JOLO-GCN: Mining Joint-Centered Light-Weight Information for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c734/1uqGtBJUFfa",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1rHeKX6WcSc",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"acronym": "isctt",
"groupId": "1840584",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rHeLWELQru",
"doi": "10.1109/ISCTT51595.2020.00040",
"title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"normalizedTitle": "Human Skeleton Graph Attention Convolutional for Video Action Recognition",
"abstract": "Action recognition based on human skeleton information is a hot topic in the field of computer vision, how to represent the human skeleton graph structure is the key of the method. Graph convolutional network is widely used to extract spatial features of human skeleton. However, the graph convolutional network shares the same weight for neighborhood of each node. In this paper, we propose Human Skeleton Graph Attention Convolutional Neural Network, which introduces graph attention convolution mechanism to extract the spatial features of human skeleton. The model improves the spatial feature extraction of skeleton graph based on the feature relationship of node neighborhood. The experimental results on Kinetics and NTU-RGB+D datasets show that the model can obtain better representation of spatial features, and can achieve better accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Action recognition based on human skeleton information is a hot topic in the field of computer vision, how to represent the human skeleton graph structure is the key of the method. Graph convolutional network is widely used to extract spatial features of human skeleton. However, the graph convolutional network shares the same weight for neighborhood of each node. In this paper, we propose Human Skeleton Graph Attention Convolutional Neural Network, which introduces graph attention convolution mechanism to extract the spatial features of human skeleton. The model improves the spatial feature extraction of skeleton graph based on the feature relationship of node neighborhood. The experimental results on Kinetics and NTU-RGB+D datasets show that the model can obtain better representation of spatial features, and can achieve better accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Action recognition based on human skeleton information is a hot topic in the field of computer vision, how to represent the human skeleton graph structure is the key of the method. Graph convolutional network is widely used to extract spatial features of human skeleton. However, the graph convolutional network shares the same weight for neighborhood of each node. In this paper, we propose Human Skeleton Graph Attention Convolutional Neural Network, which introduces graph attention convolution mechanism to extract the spatial features of human skeleton. The model improves the spatial feature extraction of skeleton graph based on the feature relationship of node neighborhood. The experimental results on Kinetics and NTU-RGB+D datasets show that the model can obtain better representation of spatial features, and can achieve better accuracy.",
"fno": "857500a183",
"keywords": [
"Computer Vision",
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Recognition",
"Video Signal Processing",
"Video Action Recognition",
"Human Skeleton Information",
"Human Skeleton Graph Structure",
"Spatial Features",
"Graph Convolutional Network Shares",
"Human Skeleton Graph Attention Convolutional Neural Network",
"Graph Attention Convolution Mechanism",
"Spatial Feature Extraction",
"Convolution",
"Pose Estimation",
"Transportation",
"Feature Extraction",
"Skeleton",
"Kinetic Theory",
"Task Analysis",
"Action Recognition",
"Computer Vision",
"Human Skeleton",
"Graph Convolutional Network",
"Attention"
],
"authors": [
{
"affiliation": "Artificial Intelligence College, Shenyang Aerospace University Shenyang,China",
"fullName": "Deyuan Zhang",
"givenName": "Deyuan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer, Shenyang Aerospace University Shenyang,China",
"fullName": "Hongwei Gao",
"givenName": "Hongwei",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Liaoning Planning and Designing Institute of Post and Telecommunication Company Shenyang,China",
"fullName": "Hailong Dai",
"givenName": "Hailong",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer, Shenyang Aerospace University Shenyang,China",
"fullName": "Xiangbin Shi",
"givenName": "Xiangbin",
"surname": "Shi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isctt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "183-187",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8575-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "857500a179",
"articleId": "1rHeM6z1NV6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "857500a188",
"articleId": "1rHeRMAKGlO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2019/1975/0/197500a061",
"title": "Skeleton-Based Action Recognition of People Handling Objects",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a061/18j8Iti1sBy",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdatase/2021/0038/0/003800a023",
"title": "Two-stream Graph Attention Convolutional for Video Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/bigdatase/2021/003800a023/1BzUyAWp44w",
"parentPublication": {
"id": "proceedings/bigdatase/2021/0038/0",
"title": "2021 IEEE 15th International Conference on Big Data Science and Engineering (BigDataSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a208",
"title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a208/1HbbBGVP8mk",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956662",
"title": "Temporal Shift and Attention Modules for Graphical Skeleton Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956662/1IHpKe3Q9Fu",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956672",
"title": "Skeletal Human Action Recognition using Hybrid Attention based Graph Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956672/1IHpKxlIcFi",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300b227",
"title": "An Attention Enhanced Graph Convolutional LSTM Network for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300b227/1gyrM0FYz5e",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a180",
"title": "Skeleton-Based Action Recognition With Shift Graph Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a180/1m3nFwgro2Y",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455987",
"title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455987/1uCgvxv1gly",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1uCglwCTpMk",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uCgvxv1gly",
"doi": "10.1109/ICMEW53276.2021.9455987",
"title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"normalizedTitle": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"abstract": "Skeleton-based action recognition has been a hot topic with the increasing development of Graph Convolutional Networks(GCNs). Previous work constructed the skeleton sequences into graphs and focused on extracting spatial-temporal information from various actions. However, they ignored the hidden information in the spectral domain of the whole graphs. In this paper, a novel graph network based on both spatio-temporal information and spectral-domain information is proposed(SS-GCN), adopting a two-stream graph topology and can be trained in an end-to-end manner. Besides, along with other GCN methods that optimize only the spatial-temporal graph, our spectral stream helps in further performance improvements. Our method(SS-GCN) is evaluated on two large skeleton-based datasets, NTU-RGBD and Kinetics-Skeleton. The experiment results demonstrate the effectiveness of SS-GCN.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Skeleton-based action recognition has been a hot topic with the increasing development of Graph Convolutional Networks(GCNs). Previous work constructed the skeleton sequences into graphs and focused on extracting spatial-temporal information from various actions. However, they ignored the hidden information in the spectral domain of the whole graphs. In this paper, a novel graph network based on both spatio-temporal information and spectral-domain information is proposed(SS-GCN), adopting a two-stream graph topology and can be trained in an end-to-end manner. Besides, along with other GCN methods that optimize only the spatial-temporal graph, our spectral stream helps in further performance improvements. Our method(SS-GCN) is evaluated on two large skeleton-based datasets, NTU-RGBD and Kinetics-Skeleton. The experiment results demonstrate the effectiveness of SS-GCN.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Skeleton-based action recognition has been a hot topic with the increasing development of Graph Convolutional Networks(GCNs). Previous work constructed the skeleton sequences into graphs and focused on extracting spatial-temporal information from various actions. However, they ignored the hidden information in the spectral domain of the whole graphs. In this paper, a novel graph network based on both spatio-temporal information and spectral-domain information is proposed(SS-GCN), adopting a two-stream graph topology and can be trained in an end-to-end manner. Besides, along with other GCN methods that optimize only the spatial-temporal graph, our spectral stream helps in further performance improvements. Our method(SS-GCN) is evaluated on two large skeleton-based datasets, NTU-RGBD and Kinetics-Skeleton. The experiment results demonstrate the effectiveness of SS-GCN.",
"fno": "09455987",
"keywords": [
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Image Recognition",
"Image Representation",
"Image Sequences",
"Learning Artificial Intelligence",
"Spatiotemporal Phenomena",
"Skeleton Based Action Recognition",
"Skeleton Sequences",
"Extracting Spatial Temporal Information",
"Hidden Information",
"Spectral Domain",
"Novel Graph Network",
"Spatio Temporal Information",
"Spectral Domain Information",
"SS GCN",
"Two Stream Graph Topology",
"Spatial Temporal Graph",
"Spectral Stream",
"Skeleton Based Datasets",
"Kinetics Skeleton",
"Spatiotemporal Spectral Graph Convolutional Networks",
"Network Topology",
"Convolution",
"Conferences",
"Skeleton",
"Spatiotemporal Phenomena",
"Topology",
"Data Mining",
"Action Recognition",
"Graph Convolution Networks",
"Spatiotemporal",
"Spectral",
"End To End"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China",
"fullName": "Shuo Chen",
"givenName": "Shuo",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China",
"fullName": "Ke Xu",
"givenName": "Ke",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China",
"fullName": "Xinghao Jiang",
"givenName": "Xinghao",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China",
"fullName": "Tanfeng Sun",
"givenName": "Tanfeng",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4989-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09455955",
"articleId": "1uCgtEwitR6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09455946",
"articleId": "1uCgqtGCaDm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2022/8563/0/09859752",
"title": "GLTA-GCN: Global-Local Temporal Attention Graph Convolutional Network for Unsupervised Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859752/1G9DAPwJBzW",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859781",
"title": "Hierarchical Graph Convolutional Skeleton Transformer for Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859781/1G9DN3HTea4",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a208",
"title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a208/1HbbBGVP8mk",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049725",
"title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300b740",
"title": "Spatial Residual Layer and Dense Connection Block Enhanced Spatial Temporal Graph Convolutional Network for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300b740/1i5mmkODZxC",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a180",
"title": "Skeleton-Based Action Recognition With Shift Graph Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a180/1m3nFwgro2Y",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4321",
"title": "Context Aware Graph Convolution for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4321/1m3o0gFXGaQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09234715",
"title": "Learning Multi-View Interactional Skeleton Graph for Action Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09234715/1o6HiTWfQkg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412366",
"title": "Recurrent Graph Convolutional Networks for Skeleton-based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412366/1tmjzTMkNTG",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1ua4stSUlfa",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"acronym": "ispa-bdcloud-socialcom-sustaincom",
"groupId": "1805944",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1ua4JGNoj9S",
"doi": "10.1109/ISPA-BDCloud-SocialCom-SustainCom51426.2020.00085",
"title": "Multi-Relational Graph Convolutional Networks for Skeleton-Based Action Recognition",
"normalizedTitle": "Multi-Relational Graph Convolutional Networks for Skeleton-Based Action Recognition",
"abstract": "In motion, the interaction relationship between the human body parts is diversified. However, the existing action recognition methods based on the graph convolution neural networks (GCNs) can only deal with a single relation of skeletons. Even some works describe different relations of the skeleton, the adjacency matrices of different relation graphs are added together. This paper proposes a multi-relational GCNs for action recognition following the idea of describing different relations between entities by knowledge graphs. The natural connection relation, symmetric connection relation, and global connection relation of the human body parts are modeled respectively. The features of the relations are transmitted and integrated through the network, which can improve the representation ability of features. Meanwhile, this paper proposes a two-stream multi-relational graph convolution networks (2S-MRGCNs), which processes the joint flow and the body part flow of skeleton data respectively to represent the action more comprehensively. The experimental results show that the 2S-MRGCNs model proposed in this paper has achieved state-of-the-art results on action recognition in kinetics and NTU-RGB+D datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In motion, the interaction relationship between the human body parts is diversified. However, the existing action recognition methods based on the graph convolution neural networks (GCNs) can only deal with a single relation of skeletons. Even some works describe different relations of the skeleton, the adjacency matrices of different relation graphs are added together. This paper proposes a multi-relational GCNs for action recognition following the idea of describing different relations between entities by knowledge graphs. The natural connection relation, symmetric connection relation, and global connection relation of the human body parts are modeled respectively. The features of the relations are transmitted and integrated through the network, which can improve the representation ability of features. Meanwhile, this paper proposes a two-stream multi-relational graph convolution networks (2S-MRGCNs), which processes the joint flow and the body part flow of skeleton data respectively to represent the action more comprehensively. The experimental results show that the 2S-MRGCNs model proposed in this paper has achieved state-of-the-art results on action recognition in kinetics and NTU-RGB+D datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In motion, the interaction relationship between the human body parts is diversified. However, the existing action recognition methods based on the graph convolution neural networks (GCNs) can only deal with a single relation of skeletons. Even some works describe different relations of the skeleton, the adjacency matrices of different relation graphs are added together. This paper proposes a multi-relational GCNs for action recognition following the idea of describing different relations between entities by knowledge graphs. The natural connection relation, symmetric connection relation, and global connection relation of the human body parts are modeled respectively. The features of the relations are transmitted and integrated through the network, which can improve the representation ability of features. Meanwhile, this paper proposes a two-stream multi-relational graph convolution networks (2S-MRGCNs), which processes the joint flow and the body part flow of skeleton data respectively to represent the action more comprehensively. The experimental results show that the 2S-MRGCNs model proposed in this paper has achieved state-of-the-art results on action recognition in kinetics and NTU-RGB+D datasets.",
"fno": "148500a474",
"keywords": [
"Feature Extraction",
"Graph Theory",
"Image Motion Analysis",
"Image Recognition",
"Image Representation",
"Learning Artificial Intelligence",
"Matrix Algebra",
"Neural Nets",
"Object Recognition",
"Multirelational Graph Convolutional Networks",
"Skeleton Based Action Recognition",
"Interaction Relationship",
"Human Body Parts",
"Existing Action Recognition Methods",
"Graph Convolution Neural Networks",
"Different Relations",
"Different Relation Graphs",
"Multirelational GC Ns",
"Knowledge Graphs",
"Natural Connection Relation",
"Symmetric Connection Relation",
"Global Connection Relation",
"Two Stream Multirelational Graph Convolution Networks",
"Body Part Flow",
"Skeleton Data",
"Knowledge Engineering",
"Symmetric Matrices",
"Convolution",
"Biological System Modeling",
"Neural Networks",
"Skeleton",
"Data Models",
"Action Recognition",
"GC Ns",
"Multi Relation",
"Knowledge Graphs",
"Skeleton"
],
"authors": [
{
"affiliation": "School of Computer Science, Shenyang Aerospace University",
"fullName": "Fang Liu",
"givenName": "Fang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Information, Shenyang Institute of Engineering",
"fullName": "Qin Dai",
"givenName": "Qin",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Innovation and Entrepreneurship, Shenyang Aerospace University,Shenyang,China",
"fullName": "Shengze Wang",
"givenName": "Shengze",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science, Shenyang Aerospace University",
"fullName": "Liang Zhao",
"givenName": "Liang",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science, Shenyang Aerospace University",
"fullName": "Xiangbin Shi",
"givenName": "Xiangbin",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science and Engineering, Northeastern University",
"fullName": "Jianzhong Qiao",
"givenName": "Jianzhong",
"surname": "Qiao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ispa-bdcloud-socialcom-sustaincom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "474-480",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-1485-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "148500a466",
"articleId": "1ua4xkor6XS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "148500a481",
"articleId": "1ua4IcuDrLa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2022/8563/0/09859781",
"title": "Hierarchical Graph Convolutional Skeleton Transformer for Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859781/1G9DN3HTea4",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859694",
"title": "Structural Attention for Channel-Wise Adaptive Graph Convolution in Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859694/1G9EEMQjNLO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a208",
"title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a208/1HbbBGVP8mk",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956327",
"title": "Skeleton-based Action Recognition with Graph Involution Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956327/1IHozeox3mE",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956108",
"title": "A Graph Convolutional Network with Early Attention Module for Skeleton-based Action Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956108/1IHpyw9hnQ4",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300m2018",
"title": "Two-Stream Adaptive Graph Convolutional Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300m2018/1gyrm53UBj2",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a180",
"title": "Skeleton-Based Action Recognition With Shift Graph Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a180/1m3nFwgro2Y",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455987",
"title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455987/1uCgvxv1gly",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428355",
"title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/2021/2701/0/270100a033",
"title": "RFC-HyPGCN: A Runtime Sparse Feature Compress Accelerator for Skeleton-Based GCNs Action Recognition Model with Hybrid Pruning",
"doi": null,
"abstractUrl": "/proceedings-article/asap/2021/270100a033/1wiR1VntNxm",
"parentPublication": {
"id": "proceedings/asap/2021/2701/0",
"title": "2021 IEEE 32nd International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBO3Kba",
"doi": "10.1109/CVPR.2017.544",
"title": "Scene Parsing through ADE20K Dataset",
"normalizedTitle": "Scene Parsing through ADE20K Dataset",
"abstract": "Scene parsing, or recognizing and segmenting objects and stuff in an image, is one of the key problems in computer vision. Despite the communitys efforts in data collection, there are still few image datasets covering a wide range of scenes and object categories with dense and detailed annotations for scene parsing. In this paper, we introduce and analyze the ADE20K dataset, spanning diverse annotations of scenes, objects, parts of objects, and in some cases even parts of parts. A scene parsing benchmark is built upon the ADE20K with 150 object and stuff classes included. Several segmentation baseline models are evaluated on the benchmark. A novel network design called Cascade Segmentation Module is proposed to parse a scene into stuff, objects, and object parts in a cascade and improve over the baselines. We further show that the trained scene parsing networks can lead to applications such as image content removal and scene synthesis1.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scene parsing, or recognizing and segmenting objects and stuff in an image, is one of the key problems in computer vision. Despite the communitys efforts in data collection, there are still few image datasets covering a wide range of scenes and object categories with dense and detailed annotations for scene parsing. In this paper, we introduce and analyze the ADE20K dataset, spanning diverse annotations of scenes, objects, parts of objects, and in some cases even parts of parts. A scene parsing benchmark is built upon the ADE20K with 150 object and stuff classes included. Several segmentation baseline models are evaluated on the benchmark. A novel network design called Cascade Segmentation Module is proposed to parse a scene into stuff, objects, and object parts in a cascade and improve over the baselines. We further show that the trained scene parsing networks can lead to applications such as image content removal and scene synthesis1.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scene parsing, or recognizing and segmenting objects and stuff in an image, is one of the key problems in computer vision. Despite the communitys efforts in data collection, there are still few image datasets covering a wide range of scenes and object categories with dense and detailed annotations for scene parsing. In this paper, we introduce and analyze the ADE20K dataset, spanning diverse annotations of scenes, objects, parts of objects, and in some cases even parts of parts. A scene parsing benchmark is built upon the ADE20K with 150 object and stuff classes included. Several segmentation baseline models are evaluated on the benchmark. A novel network design called Cascade Segmentation Module is proposed to parse a scene into stuff, objects, and object parts in a cascade and improve over the baselines. We further show that the trained scene parsing networks can lead to applications such as image content removal and scene synthesis1.",
"fno": "0457f122",
"keywords": [
"Computer Vision",
"Image Representation",
"Image Segmentation",
"Learning Artificial Intelligence",
"Object Recognition",
"ADE 20 K Dataset",
"Image Datasets",
"Dense Annotations",
"Detailed Annotations",
"Scene Parsing Benchmark",
"Object Parts",
"Trained Scene Parsing Networks",
"Object Segmentation",
"Object Recognition",
"Image Segmentation",
"Semantics",
"Sun",
"Labeling",
"Visualization",
"Neural Networks",
"Computer Vision"
],
"authors": [
{
"affiliation": null,
"fullName": "Bolei Zhou",
"givenName": "Bolei",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hang Zhao",
"givenName": "Hang",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xavier Puig",
"givenName": "Xavier",
"surname": "Puig",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sanja Fidler",
"givenName": "Sanja",
"surname": "Fidler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Adela Barriuso",
"givenName": "Adela",
"surname": "Barriuso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Antonio Torralba",
"givenName": "Antonio",
"surname": "Torralba",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "5122-5130",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457f112",
"articleId": "12OmNxRnvNu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457f131",
"articleId": "12OmNynJMQO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457g269",
"title": "Indoor Scene Parsing with Instance Segmentation, Semantic Labeling and Support Relationship Inference",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g269/12OmNAq3hLV",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019367",
"title": "HDPA: Hierarchical deep probability analysis for scene parsing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019367/12OmNBInLln",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c650",
"title": "Scene Parsing with Global Context Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c650/12OmNC8dgk5",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g230",
"title": "Pyramid Scene Parsing Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g230/12OmNvrMUeP",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c050",
"title": "Scale-Adaptive Convolutions for Scene Parsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c050/12OmNylKAYo",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005839",
"title": "Research on Scene Parsing Algorithm Cascading Object Detection Network",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005839/17D45WKWnI8",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5828",
"title": "Interaction via Bi-directional Graph of Semantic Region Affinity for Scene Parsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5828/1BmKfmmaZJm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956641",
"title": "Improving Weakly Supervised Scene Graph Parsing through Object Grounding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956641/1IHoHmmLiEw",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g747",
"title": "Adaptive Context Network for Scene Parsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g747/1hQqwNOmVjO",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/4.509E136",
"title": "VSPW: A Large-scale Dataset for Video Scene Parsing in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/4.509E136/1yeIjkShsLm",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyugyQd",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"acronym": "psivt",
"groupId": "1800241",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBQTJg1",
"doi": "10.1109/PSIVT.2010.14",
"title": "Semantic Segmentation and Object Recognition Using Scene-Context Scale",
"normalizedTitle": "Semantic Segmentation and Object Recognition Using Scene-Context Scale",
"abstract": "Scene-context plays an important role in scene analysis and object recognition. Among various sources of scene-context, we focus on scene-context scale, which means the effective region size of local context to classify an image pixel in a scene. This paper presents semantic segmentation and object recognition using scene-context scale. The scene-context scale can be estimated by the entropy of the leaf node in multi-scale text on forests. The multi-scale text on forests efficiently provide both hierarchical clustering into semantic textons and local classification depending on different scale levels. For semantic segmentation, we combine the classified category distributions of scene-context scale with the bag-of-textons model. In our experiments, we use MSRC21 segmentation dataset to assess our segmentation algorithm and show that the usage of the scene-context scale improves recognition performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scene-context plays an important role in scene analysis and object recognition. Among various sources of scene-context, we focus on scene-context scale, which means the effective region size of local context to classify an image pixel in a scene. This paper presents semantic segmentation and object recognition using scene-context scale. The scene-context scale can be estimated by the entropy of the leaf node in multi-scale text on forests. The multi-scale text on forests efficiently provide both hierarchical clustering into semantic textons and local classification depending on different scale levels. For semantic segmentation, we combine the classified category distributions of scene-context scale with the bag-of-textons model. In our experiments, we use MSRC21 segmentation dataset to assess our segmentation algorithm and show that the usage of the scene-context scale improves recognition performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scene-context plays an important role in scene analysis and object recognition. Among various sources of scene-context, we focus on scene-context scale, which means the effective region size of local context to classify an image pixel in a scene. This paper presents semantic segmentation and object recognition using scene-context scale. The scene-context scale can be estimated by the entropy of the leaf node in multi-scale text on forests. The multi-scale text on forests efficiently provide both hierarchical clustering into semantic textons and local classification depending on different scale levels. For semantic segmentation, we combine the classified category distributions of scene-context scale with the bag-of-textons model. In our experiments, we use MSRC21 segmentation dataset to assess our segmentation algorithm and show that the usage of the scene-context scale improves recognition performance.",
"fno": "4285a039",
"keywords": [
"Scene Context Scale",
"Semantic Segmentation",
"Object Recognition"
],
"authors": [
{
"affiliation": null,
"fullName": "Yousun Kang",
"givenName": "Yousun",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hiroshi Nagahashi",
"givenName": "Hiroshi",
"surname": "Nagahashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Akihiro Sugimoto",
"givenName": "Akihiro",
"surname": "Sugimoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "psivt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "39-45",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4285-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4285a033",
"articleId": "12OmNBzAci8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4285a046",
"articleId": "12OmNwF0BYK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dimpvt/2011/4369/0/4369a057",
"title": "Scene Segmentation Assisted by Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2011/4369a057/12OmNASraUu",
"parentPublication": {
"id": "proceedings/3dimpvt/2011/4369/0",
"title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521588",
"title": "Improved semantic region labeling based on scene context",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521588/12OmNwFzO1I",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2011/4589/0/4589a257",
"title": "Scale-Optimized Textons for Image Categorization and Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2011/4589a257/12OmNx8figY",
"parentPublication": {
"id": "proceedings/ism/2011/4589/0",
"title": "2011 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mue/2008/3134/0/3134a407",
"title": "SVM-Based Video Scene Classification and Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/mue/2008/3134a407/12OmNyjLoOW",
"parentPublication": {
"id": "proceedings/mue/2008/3134/0",
"title": "Multimedia and Ubiquitous Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/06/07940028",
"title": "Scene Segmentation with DAG-Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2018/06/07940028/13rRUygT7ap",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c393",
"title": "Context Contrasted Feature and Gated Multi-scale Aggregation for Scene Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c393/17D45WrVg21",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300h511",
"title": "Adaptive Pyramid Context Network for Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300h511/1gyrJPSuYlG",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300d561",
"title": "Dynamic Multi-Scale Filters for Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300d561/1hVlQHtE1OM",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2413",
"title": "Context Prior for Scene Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2413/1m3nof0iuyY",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2021/0032/0/09555526",
"title": "Semantic Scene Segmentation for Robotics Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2021/09555526/1xxcn8C5rvq",
"parentPublication": {
"id": "proceedings/iisa/2021/0032/0",
"title": "2021 12th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwHz0aI",
"doi": "10.1109/CVPR.2017.537",
"title": "Physically-Based Rendering for Indoor Scene Understanding Using Convolutional Neural Networks",
"normalizedTitle": "Physically-Based Rendering for Indoor Scene Understanding Using Convolutional Neural Networks",
"abstract": "Indoor scene understanding is central to applications such as robot navigation and human companion assistance. Over the last years, data-driven deep neural networks have outperformed many traditional approaches thanks to their representation learning capabilities. One of the bottlenecks in training for better representations is the amount of available per-pixel ground truth data that is required for core scene understanding tasks such as semantic segmentation, normal prediction, and object boundary detection. To address this problem, a number of works proposed using synthetic data. However, a systematic study of how such synthetic data is generated is missing. In this work, we introduce a large-scale synthetic dataset with 500K physically-based rendered images from 45K realistic 3D indoor scenes. We study the effects of rendering methods and scene lighting on training for three computer vision tasks: surface normal prediction, semantic segmentation, and object boundary detection. This study provides insights into the best practices for training with synthetic data (more realistic rendering is worth it) and shows that pretraining with our new synthetic dataset can improve results beyond the current state of the art on all three tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Indoor scene understanding is central to applications such as robot navigation and human companion assistance. Over the last years, data-driven deep neural networks have outperformed many traditional approaches thanks to their representation learning capabilities. One of the bottlenecks in training for better representations is the amount of available per-pixel ground truth data that is required for core scene understanding tasks such as semantic segmentation, normal prediction, and object boundary detection. To address this problem, a number of works proposed using synthetic data. However, a systematic study of how such synthetic data is generated is missing. In this work, we introduce a large-scale synthetic dataset with 500K physically-based rendered images from 45K realistic 3D indoor scenes. We study the effects of rendering methods and scene lighting on training for three computer vision tasks: surface normal prediction, semantic segmentation, and object boundary detection. This study provides insights into the best practices for training with synthetic data (more realistic rendering is worth it) and shows that pretraining with our new synthetic dataset can improve results beyond the current state of the art on all three tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Indoor scene understanding is central to applications such as robot navigation and human companion assistance. Over the last years, data-driven deep neural networks have outperformed many traditional approaches thanks to their representation learning capabilities. One of the bottlenecks in training for better representations is the amount of available per-pixel ground truth data that is required for core scene understanding tasks such as semantic segmentation, normal prediction, and object boundary detection. To address this problem, a number of works proposed using synthetic data. However, a systematic study of how such synthetic data is generated is missing. In this work, we introduce a large-scale synthetic dataset with 500K physically-based rendered images from 45K realistic 3D indoor scenes. We study the effects of rendering methods and scene lighting on training for three computer vision tasks: surface normal prediction, semantic segmentation, and object boundary detection. This study provides insights into the best practices for training with synthetic data (more realistic rendering is worth it) and shows that pretraining with our new synthetic dataset can improve results beyond the current state of the art on all three tasks.",
"fno": "0457f057",
"keywords": [
"Computer Vision",
"Image Segmentation",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Rendering Computer Graphics",
"Solid Modelling",
"Indoor Scene Understanding",
"Convolutional Neural Networks",
"Representation Learning Capabilities",
"Per Pixel Ground Truth Data",
"Core Scene Understanding Tasks",
"Semantic Segmentation",
"Object Boundary Detection",
"Large Scale Synthetic Dataset",
"Surface Normal Prediction",
"Physically Based Rendering",
"Data Driven Deep Neural Networks",
"Image Rendering",
"3 D Indoor Scenes",
"Rendering Computer Graphics",
"Lighting",
"Three Dimensional Displays",
"Cameras",
"Semantics",
"Training"
],
"authors": [
{
"affiliation": null,
"fullName": "Yinda Zhang",
"givenName": "Yinda",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuran Song",
"givenName": "Shuran",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ersin Yumer",
"givenName": "Ersin",
"surname": "Yumer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Manolis Savva",
"givenName": "Manolis",
"surname": "Savva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joon-Young Lee",
"givenName": "Joon-Young",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hailin Jin",
"givenName": "Hailin",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thomas Funkhouser",
"givenName": "Thomas",
"surname": "Funkhouser",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "5057-5065",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457f048",
"articleId": "12OmNyp9MkB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457f066",
"articleId": "12OmNrnJ6Nw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a226",
"title": "Scene Conversion for Physically-Based Renderers",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a226/17D45WaTkhE",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3759",
"title": "Learning Object-Compositional Neural Radiance Field for Editable Scene Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3759/1BmFVQ9MBFK",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2518",
"title": "Learning Indoor Inverse Rendering with 3D Spatially-Varying Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2518/1BmI8MZrhYY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10043749",
"title": "MILO: Multi-bounce Inverse Rendering for Indoor Scene with Light-emitting Objects",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10043749/1KJs5SH0na8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050341",
"title": "GeoSynth: A Photorealistic Synthetic Indoor Dataset for Scene Understanding",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050341/1KYorVtFExW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08936883",
"title": "Planar Abstraction and Inverse Rendering of 3D Indoor Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08936883/1fTdX59qZUs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i597",
"title": "Neural Inverse Rendering of an Indoor Scene From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i597/1hVlOrVOpck",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c730",
"title": "Deep CG2Real: Synthetic-to-Real Translation via Image Disentanglement",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c730/1hVltQyTSXS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c472",
"title": "Inverse Rendering for Complex Indoor Scenes: Shape, Spatially-Varying Lighting and SVBRDF From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c472/1m3o03C864M",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2021/1952/0/09466274",
"title": "View-dependent Scene Appearance Synthesis using Inverse Rendering from Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2021/09466274/1uSSV7tRhSw",
"parentPublication": {
"id": "proceedings/iccp/2021/1952/0",
"title": "2021 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwdL7kX",
"doi": "10.1109/ICCV.2017.292",
"title": "SceneNet RGB-D: Can 5M Synthetic Images Beat Generic ImageNet Pre-training on Indoor Segmentation?",
"normalizedTitle": "SceneNet RGB-D: Can 5M Synthetic Images Beat Generic ImageNet Pre-training on Indoor Segmentation?",
"abstract": "We introduce SceneNet RGB-D, a dataset providing pixel-perfect ground truth for scene understanding problems such as semantic segmentation, instance segmentation, and object detection. It also provides perfect camera poses and depth data, allowing investigation into geometric computer vision problems such as optical flow, camera pose estimation, and 3D scene labelling tasks. Random sampling permits virtually unlimited scene configurations, and here we provide 5M rendered RGB-D images from 16K randomly generated 3D trajectories in synthetic layouts, with random but physically simulated object configurations. We compare the semantic segmentation performance of network weights produced from pretraining on RGB images from our dataset against generic VGG-16 ImageNet weights. After fine-tuning on the SUN RGB-D and NYUv2 real-world datasets we find in both cases that the synthetically pre-trained network outperforms the VGG-16 weights. When synthetic pre-training includes a depth channel (something ImageNet cannot natively provide) the performance is greater still. This suggests that large-scale high-quality synthetic RGB datasets with task-specific labels can be more useful for pretraining than real-world generic pre-training such as ImageNet. We host the dataset at http://robotvault. bitbucket.io/scenenet-rgbd.html.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce SceneNet RGB-D, a dataset providing pixel-perfect ground truth for scene understanding problems such as semantic segmentation, instance segmentation, and object detection. It also provides perfect camera poses and depth data, allowing investigation into geometric computer vision problems such as optical flow, camera pose estimation, and 3D scene labelling tasks. Random sampling permits virtually unlimited scene configurations, and here we provide 5M rendered RGB-D images from 16K randomly generated 3D trajectories in synthetic layouts, with random but physically simulated object configurations. We compare the semantic segmentation performance of network weights produced from pretraining on RGB images from our dataset against generic VGG-16 ImageNet weights. After fine-tuning on the SUN RGB-D and NYUv2 real-world datasets we find in both cases that the synthetically pre-trained network outperforms the VGG-16 weights. When synthetic pre-training includes a depth channel (something ImageNet cannot natively provide) the performance is greater still. This suggests that large-scale high-quality synthetic RGB datasets with task-specific labels can be more useful for pretraining than real-world generic pre-training such as ImageNet. We host the dataset at http://robotvault. bitbucket.io/scenenet-rgbd.html.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce SceneNet RGB-D, a dataset providing pixel-perfect ground truth for scene understanding problems such as semantic segmentation, instance segmentation, and object detection. It also provides perfect camera poses and depth data, allowing investigation into geometric computer vision problems such as optical flow, camera pose estimation, and 3D scene labelling tasks. Random sampling permits virtually unlimited scene configurations, and here we provide 5M rendered RGB-D images from 16K randomly generated 3D trajectories in synthetic layouts, with random but physically simulated object configurations. We compare the semantic segmentation performance of network weights produced from pretraining on RGB images from our dataset against generic VGG-16 ImageNet weights. After fine-tuning on the SUN RGB-D and NYUv2 real-world datasets we find in both cases that the synthetically pre-trained network outperforms the VGG-16 weights. When synthetic pre-training includes a depth channel (something ImageNet cannot natively provide) the performance is greater still. This suggests that large-scale high-quality synthetic RGB datasets with task-specific labels can be more useful for pretraining than real-world generic pre-training such as ImageNet. We host the dataset at http://robotvault. bitbucket.io/scenenet-rgbd.html.",
"fno": "1032c697",
"keywords": [
"Cameras",
"Computer Vision",
"Image Colour Analysis",
"Image Segmentation",
"Image Sensors",
"Image Sequences",
"Object Detection",
"Pose Estimation",
"Scene Net RGB D",
"5 M Synthetic Images",
"Generic Imagenet Pre Training",
"Indoor Segmentation",
"Pixel Perfect Ground Truth",
"Object Detection",
"Geometric Computer Vision Problems",
"Optical Flow",
"3 D Scene Labelling Tasks",
"Random Sampling",
"Unlimited Scene Configurations",
"RGB D Images",
"Synthetic Layouts",
"Semantic Segmentation Performance",
"RGB Images",
"Generic VGG 16 Image Net Weights",
"SUN RGB D",
"NY Uv 2 Real World Datasets",
"Synthetically Pre Trained Network",
"VGG 16 Weights",
"Depth Channel",
"High Quality Synthetic RGB Datasets",
"Task Specific Labels",
"Image Net",
"Synthetic Pretraining",
"Real World Generic Pretraining",
"Temperature 16 0 K",
"SUN",
"Three Dimensional Displays",
"Trajectory",
"Semantics",
"Layout",
"Rendering Computer Graphics",
"Videos",
"Image Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "John McCormac",
"givenName": "John",
"surname": "McCormac",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ankur Handa",
"givenName": "Ankur",
"surname": "Handa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stefan Leutenegger",
"givenName": "Stefan",
"surname": "Leutenegger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Andrew J. Davison",
"givenName": "Andrew J.",
"surname": "Davison",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2697-2706",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032c688",
"articleId": "12OmNro0HRE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032c707",
"articleId": "12OmNCmGNPt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2015/6759/0/07301302",
"title": "Robust object recognition in RGB-D egocentric videos based on Sparse Affine Hull Kernel",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2015/07301302/12OmNBrV1Qf",
"parentPublication": {
"id": "proceedings/cvprw/2015/6759/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977424",
"title": "Depth Structure Association for RGB-D Multi-target Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977424/12OmNySG3Ts",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a416",
"title": "Scene Flow to Action Map: A New Representation for RGB-D Based Action Recognition with Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a416/12OmNz3bdIy",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a774",
"title": "Semantic Pose Using Deep Networks Trained on Synthetic RGB-D",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a774/12OmNz61d4S",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/04/07115131",
"title": "Intrinsic Scene Properties from a Single RGB-D Image",
"doi": null,
"abstractUrl": "/journal/tp/2016/04/07115131/13rRUNvyamf",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2022/5824/0/582400a500",
"title": "Learning from Synthetic Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2022/582400a500/1B12vrAF2M0",
"parentPublication": {
"id": "proceedings/wacvw/2022/5824/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h068",
"title": "ShapeConv: Shape-aware Convolutional Layer for Indoor RGB-D Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h068/1BmK6eSnTUI",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09782511",
"title": "MMNet: A Model-Based Multimodal Network for Human Action Recognition in RGB-D Videos",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09782511/1DGRY8gAcPm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2005/2372/2/01467605",
"title": "RGB-Z: mapping a sparse depth map to a high resolution RGB camera image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2005/01467605/1htC5iEJs1W",
"parentPublication": {
"id": "proceedings/cvpr/2005/2372/2",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047367",
"title": "Parsing Indoor Scenes from RGB-D Image Using Superpixel and Region Merging",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047367/1iC6D9QWozm",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmHg3A2R7G",
"doi": "10.1109/ICCV48922.2021.01558",
"title": "Segmentation-grounded Scene Graph Generation",
"normalizedTitle": "Segmentation-grounded Scene Graph Generation",
"abstract": "Scene graph generation has emerged as an important problem in computer vision. While scene graphs provide a grounded representation of objects, their locations and relations in an image, they do so only at the granularity of proposal bounding boxes. In this work, we propose the first, to our knowledge, framework for pixel-level segmentation-grounded scene graph generation. Our framework is agnostic to the underlying scene graph generation method and address the lack of segmentation annotations in target scene graph datasets (e.g., Visual Genome [24]) through transfer and multi-task learning from, and with, an auxiliary dataset (e.g., MS COCO [29]). Specifically, each target object being detected is endowed with a segmentation mask, which is expressed as a lingual-similarity weighted linear combination over categories that have annotations present in an auxiliary dataset. These inferred masks, along with a Gaussian masking mechanism which grounds the relations at a pixel-level within the image, allow for improved relation prediction. The entire framework is end-to-end trainable and is learned in a multi-task manner. Code is available at github.com/ubc-vision/segmentation-sg.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scene graph generation has emerged as an important problem in computer vision. While scene graphs provide a grounded representation of objects, their locations and relations in an image, they do so only at the granularity of proposal bounding boxes. In this work, we propose the first, to our knowledge, framework for pixel-level segmentation-grounded scene graph generation. Our framework is agnostic to the underlying scene graph generation method and address the lack of segmentation annotations in target scene graph datasets (e.g., Visual Genome [24]) through transfer and multi-task learning from, and with, an auxiliary dataset (e.g., MS COCO [29]). Specifically, each target object being detected is endowed with a segmentation mask, which is expressed as a lingual-similarity weighted linear combination over categories that have annotations present in an auxiliary dataset. These inferred masks, along with a Gaussian masking mechanism which grounds the relations at a pixel-level within the image, allow for improved relation prediction. The entire framework is end-to-end trainable and is learned in a multi-task manner. Code is available at github.com/ubc-vision/segmentation-sg.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scene graph generation has emerged as an important problem in computer vision. While scene graphs provide a grounded representation of objects, their locations and relations in an image, they do so only at the granularity of proposal bounding boxes. In this work, we propose the first, to our knowledge, framework for pixel-level segmentation-grounded scene graph generation. Our framework is agnostic to the underlying scene graph generation method and address the lack of segmentation annotations in target scene graph datasets (e.g., Visual Genome [24]) through transfer and multi-task learning from, and with, an auxiliary dataset (e.g., MS COCO [29]). Specifically, each target object being detected is endowed with a segmentation mask, which is expressed as a lingual-similarity weighted linear combination over categories that have annotations present in an auxiliary dataset. These inferred masks, along with a Gaussian masking mechanism which grounds the relations at a pixel-level within the image, allow for improved relation prediction. The entire framework is end-to-end trainable and is learned in a multi-task manner. Code is available at github.com/ubc-vision/segmentation-sg.",
"fno": "281200p5859",
"keywords": [
"Image Segmentation",
"Computer Vision",
"Visualization",
"Grounding",
"Annotations",
"Genomics",
"Computer Architecture",
"Scene Analysis And Understanding",
"Segmentation",
"Grouping And Shape",
"Transfer Low Shot Semi Unsupervised Learning"
],
"authors": [
{
"affiliation": "University of British Columbia,Department of Computer Science",
"fullName": "Siddhesh Khandelwal",
"givenName": "Siddhesh",
"surname": "Khandelwal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia,Department of Computer Science",
"fullName": "Mohammed Suhail",
"givenName": "Mohammed",
"surname": "Suhail",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia,Department of Computer Science",
"fullName": "Leonid Sigal",
"givenName": "Leonid",
"surname": "Sigal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "15859-15869",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200p5849",
"articleId": "1BmLdo6m8Tu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200p5870",
"articleId": "1BmLhPbVYs0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457d097",
"title": "Scene Graph Generation by Iterative Message Passing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d097/12OmNBAqZH0",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6352",
"title": "Spatial-Temporal Transformer for Dynamic Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6352/1BmEvNHaTO8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6342",
"title": "Unconditional Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6342/1BmFToTztcc",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6373",
"title": "A Simple Baseline for Weakly-Supervised Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6373/1BmKSXj9184",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859841",
"title": "High-Quality Image Generation from Scene Graphs with Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859841/1G9DSypmuNa",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956641",
"title": "Improving Weakly Supervised Scene Graph Parsing through Object Grounding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956641/1IHoHmmLiEw",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956599",
"title": "Transformer-based Scene Graph Generation Network With Relational Attention Module",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956599/1IHpdpALly8",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412662",
"title": "FashionGraph: Understanding fashion data using scene graph generation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412662/1tmjp7Qu9ji",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1104",
"title": "Bipartite Graph Network with Adaptive Message Passing for Unbiased Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1104/1yeJ9bKPSqQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1541",
"title": "Fully Convolutional Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1541/1yeJJIVP3bi",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmJJPy8ySA",
"doi": "10.1109/ICCV48922.2021.01073",
"title": "Hypersim: A Photorealistic Synthetic Dataset for Holistic Indoor Scene Understanding",
"normalizedTitle": "Hypersim: A Photorealistic Synthetic Dataset for Holistic Indoor Scene Understanding",
"abstract": "For many fundamental scene understanding tasks, it is difficult or impossible to obtain per-pixel ground truth labels from real images. We address this challenge by introducing Hypersim, a photorealistic synthetic dataset for holistic indoor scene understanding. To create our dataset, we leverage a large repository of synthetic scenes created by professional artists, and we generate 77,400 images of 461 indoor scenes with detailed per-pixel labels and corresponding ground truth geometry. Our dataset: (1) relies exclusively on publicly available 3D assets; (2) includes complete scene geometry, material information, and lighting information for every scene; (3) includes dense per-pixel semantic instance segmentations and complete camera information for every image; and (4) factors every image into diffuse reflectance, diffuse illumination, and a non-diffuse residual term that captures view-dependent lighting effects.We analyze our dataset at the level of scenes, objects, and pixels, and we analyze costs in terms of money, computation time, and annotation effort. Remarkably, we find that it is possible to generate our entire dataset from scratch, for roughly half the cost of training a popular open-source natural language processing model. We also evaluate sim-to-real transfer performance on two real-world scene understanding tasks – semantic segmentation and 3D shape prediction – where we find that pre-training on our dataset significantly improves performance on both tasks, and achieves state-of-the-art performance on the most challenging Pix3D test set. All of our rendered image data, as well as all the code we used to generate our dataset and perform our experiments, is available online.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For many fundamental scene understanding tasks, it is difficult or impossible to obtain per-pixel ground truth labels from real images. We address this challenge by introducing Hypersim, a photorealistic synthetic dataset for holistic indoor scene understanding. To create our dataset, we leverage a large repository of synthetic scenes created by professional artists, and we generate 77,400 images of 461 indoor scenes with detailed per-pixel labels and corresponding ground truth geometry. Our dataset: (1) relies exclusively on publicly available 3D assets; (2) includes complete scene geometry, material information, and lighting information for every scene; (3) includes dense per-pixel semantic instance segmentations and complete camera information for every image; and (4) factors every image into diffuse reflectance, diffuse illumination, and a non-diffuse residual term that captures view-dependent lighting effects.We analyze our dataset at the level of scenes, objects, and pixels, and we analyze costs in terms of money, computation time, and annotation effort. Remarkably, we find that it is possible to generate our entire dataset from scratch, for roughly half the cost of training a popular open-source natural language processing model. We also evaluate sim-to-real transfer performance on two real-world scene understanding tasks – semantic segmentation and 3D shape prediction – where we find that pre-training on our dataset significantly improves performance on both tasks, and achieves state-of-the-art performance on the most challenging Pix3D test set. All of our rendered image data, as well as all the code we used to generate our dataset and perform our experiments, is available online.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For many fundamental scene understanding tasks, it is difficult or impossible to obtain per-pixel ground truth labels from real images. We address this challenge by introducing Hypersim, a photorealistic synthetic dataset for holistic indoor scene understanding. To create our dataset, we leverage a large repository of synthetic scenes created by professional artists, and we generate 77,400 images of 461 indoor scenes with detailed per-pixel labels and corresponding ground truth geometry. Our dataset: (1) relies exclusively on publicly available 3D assets; (2) includes complete scene geometry, material information, and lighting information for every scene; (3) includes dense per-pixel semantic instance segmentations and complete camera information for every image; and (4) factors every image into diffuse reflectance, diffuse illumination, and a non-diffuse residual term that captures view-dependent lighting effects.We analyze our dataset at the level of scenes, objects, and pixels, and we analyze costs in terms of money, computation time, and annotation effort. Remarkably, we find that it is possible to generate our entire dataset from scratch, for roughly half the cost of training a popular open-source natural language processing model. We also evaluate sim-to-real transfer performance on two real-world scene understanding tasks – semantic segmentation and 3D shape prediction – where we find that pre-training on our dataset significantly improves performance on both tasks, and achieves state-of-the-art performance on the most challenging Pix3D test set. All of our rendered image data, as well as all the code we used to generate our dataset and perform our experiments, is available online.",
"fno": "281200k0892",
"keywords": [
"Training",
"Computer Vision",
"Image Segmentation",
"Costs",
"Three Dimensional Displays",
"Computational Modeling",
"Semantics",
"Datasets And Evaluation",
"3 D From A Single Image And Shape From X",
"Scene Analysis And Understanding"
],
"authors": [
{
"affiliation": "Apple",
"fullName": "Mike Roberts",
"givenName": "Mike",
"surname": "Roberts",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Jason Ramapuram",
"givenName": "Jason",
"surname": "Ramapuram",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Anurag Ranjan",
"givenName": "Anurag",
"surname": "Ranjan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Atulit Kumar",
"givenName": "Atulit",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Miguel Angel Bautista",
"givenName": "Miguel Angel",
"surname": "Bautista",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Nathan Paczan",
"givenName": "Nathan",
"surname": "Paczan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Russ Webb",
"givenName": "Russ",
"surname": "Webb",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Apple",
"fullName": "Joshua M. Susskind",
"givenName": "Joshua M.",
"surname": "Susskind",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "10892-10902",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200k0881",
"articleId": "1BmKIhRemJ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200k0903",
"articleId": "1BmGMUeU5UY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f122",
"title": "Scene Parsing through ADE20K Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f122/12OmNBO3Kba",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c697",
"title": "SceneNet RGB-D: Can 5M Synthetic Images Beat Generic ImageNet Pre-training on Indoor Segmentation?",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c697/12OmNwdL7kX",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/07780811",
"title": "Understanding RealWorld Indoor Scenes with Synthetic Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780811/12OmNxE2mKD",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050341",
"title": "GeoSynth: A Photorealistic Synthetic Indoor Dataset for Scene Understanding",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050341/1KYorVtFExW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10098665",
"title": "Temporal Pixel-Level Semantic Understanding Through the VSPW Dataset",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10098665/1Mg6cl33U2I",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a475",
"title": "SIDOD: A Synthetic Image Dataset for 3D Object Pose Recognition With Distractors",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a475/1iTvum5DfXy",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093563",
"title": "Learning from THEODORE: A Synthetic Omnidirectional Top-View Indoor Dataset for Deep Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093563/1jPbCpUwBNK",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700b578",
"title": "EDEN: Multimodal Synthetic Dataset of Enclosed GarDEN Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700b578/1uqGMNvRXNe",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c951",
"title": "RaidaR: A Rich Annotated Image Dataset of Rainy Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c951/1yNhZNWtnu8",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h186",
"title": "OpenRooms: An Open Framework for Photorealistic Indoor Scene Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h186/1yeImiGYbbG",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHoKvQ8Kf6",
"doi": "10.1109/ICPR56361.2022.9956064",
"title": "ColorWater: A Diverse Dataset and Benchmark for Semantic Water Surface Understanding",
"normalizedTitle": "ColorWater: A Diverse Dataset and Benchmark for Semantic Water Surface Understanding",
"abstract": "Intelligent water environment monitoring has received global attention. Providing researchers with a water surface dataset with sufficient data and diversity is an important support for training and evaluating the water surface understanding model. However, current water surface datasets lack high-quality and rich annotations of various floating objects on real-world water surfaces, resulting in a partial understanding of the water surface by existing intelligent monitoring methods of the water environment. To address this, we present ColorWater, a diverse semantic segmentation dataset tailored for comprehensively understanding the complex water surface. ColorWater contains 1279 complex water images and pixel-level annotations consisting of 9 categories and 10942 objects and stuff. Our effort exceeds previous datasets in terms of annotation richness and scene complexity. In ColorWater, there exists scale diversity in which the small objects are the majority. Thus, we propose a new global evaluation metric, SWIoU (Scale Weight IoU), to improve the sensitivity of the global metric for the segmentation of differentscale objects and reasonably evaluate the global performance of semantic water surface segmentation models. We further provide an in-depth analysis of annotation inconsistency and dataset’s characteristics. Moreover, we also build a benchmark evaluation of semantic segmentation methods in the ColorWater. Our dataset is publicly available at https://github.com/L-cuixiao/ColorWater.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Intelligent water environment monitoring has received global attention. Providing researchers with a water surface dataset with sufficient data and diversity is an important support for training and evaluating the water surface understanding model. However, current water surface datasets lack high-quality and rich annotations of various floating objects on real-world water surfaces, resulting in a partial understanding of the water surface by existing intelligent monitoring methods of the water environment. To address this, we present ColorWater, a diverse semantic segmentation dataset tailored for comprehensively understanding the complex water surface. ColorWater contains 1279 complex water images and pixel-level annotations consisting of 9 categories and 10942 objects and stuff. Our effort exceeds previous datasets in terms of annotation richness and scene complexity. In ColorWater, there exists scale diversity in which the small objects are the majority. Thus, we propose a new global evaluation metric, SWIoU (Scale Weight IoU), to improve the sensitivity of the global metric for the segmentation of differentscale objects and reasonably evaluate the global performance of semantic water surface segmentation models. We further provide an in-depth analysis of annotation inconsistency and dataset’s characteristics. Moreover, we also build a benchmark evaluation of semantic segmentation methods in the ColorWater. Our dataset is publicly available at https://github.com/L-cuixiao/ColorWater.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Intelligent water environment monitoring has received global attention. Providing researchers with a water surface dataset with sufficient data and diversity is an important support for training and evaluating the water surface understanding model. However, current water surface datasets lack high-quality and rich annotations of various floating objects on real-world water surfaces, resulting in a partial understanding of the water surface by existing intelligent monitoring methods of the water environment. To address this, we present ColorWater, a diverse semantic segmentation dataset tailored for comprehensively understanding the complex water surface. ColorWater contains 1279 complex water images and pixel-level annotations consisting of 9 categories and 10942 objects and stuff. Our effort exceeds previous datasets in terms of annotation richness and scene complexity. In ColorWater, there exists scale diversity in which the small objects are the majority. Thus, we propose a new global evaluation metric, SWIoU (Scale Weight IoU), to improve the sensitivity of the global metric for the segmentation of differentscale objects and reasonably evaluate the global performance of semantic water surface segmentation models. We further provide an in-depth analysis of annotation inconsistency and dataset’s characteristics. Moreover, we also build a benchmark evaluation of semantic segmentation methods in the ColorWater. Our dataset is publicly available at https://github.com/L-cuixiao/ColorWater.",
"fno": "09956064",
"keywords": [
"Environmental Monitoring Geophysics",
"Geophysical Image Processing",
"Hydrological Techniques",
"Image Annotation",
"Image Colour Analysis",
"Image Segmentation",
"Annotation Inconsistency",
"Benchmark Evaluation",
"Complex Water Images",
"Complex Water Surface",
"Current Water Surface Datasets",
"Dataset Characteristics",
"Diverse Dataset",
"Diverse Semantic Segmentation Dataset",
"Floating Object Annotation",
"Global Attention",
"Global Evaluation Metric",
"Global Performance Evaluation",
"Intelligent Monitoring Methods",
"Intelligent Water Environment Monitoring",
"Pixel Level Annotations",
"Real World Water Surfaces",
"Scale Diversity",
"Scale Object Segmentation",
"Scale Weight Io U",
"Scene Complexity",
"Semantic Segmentation Methods",
"Semantic Water Surface Segmentation Models",
"Semantic Water Surface Understanding Model",
"SW Io U",
"Water Surface Dataset",
"Water",
"Training",
"Sensitivity",
"Annotations",
"Semantic Segmentation",
"Semantics",
"Benchmark Testing"
],
"authors": [
{
"affiliation": "South China University of Technology,Guangzhou,China",
"fullName": "Cuixiao Liang",
"givenName": "Cuixiao",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shunfeng Technology (Shenzhen) Co., Ltd,Shenzhen,China",
"fullName": "Wenjie Cai",
"givenName": "Wenjie",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology,Guangzhou,China",
"fullName": "Qiong Liu",
"givenName": "Qiong",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "3743-3749",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956121",
"articleId": "1IHpDkO8d0I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956612",
"articleId": "1IHqDsIwTh6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isise/2008/3494/1/3494a691",
"title": "Interactive Dynamic Water Surface Fast Rendering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494a691/12OmNBTawt6",
"parentPublication": {
"id": "proceedings/isise/2008/3494/1",
"title": "2008 International Symposium on Information Science and Engieering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06455238",
"title": "The Surface Water Vulnerability Assessment of Arid Pastoral Areas",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06455238/12OmNC8MsMI",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2011/4621/0/4621a109",
"title": "Realtime Video Based Water Surface Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2011/4621a109/12OmNscxj23",
"parentPublication": {
"id": "proceedings/cvmp/2011/4621/0",
"title": "2011 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2013/4932/0/4932a291",
"title": "Application of Fuzzy C-means Clustering for Assessing Rural Surface Water Quality in Lianyungang City",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2013/4932a291/12OmNvyjGg2",
"parentPublication": {
"id": "proceedings/icmtma/2013/4932/0",
"title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06456676",
"title": "Characteristics and Evaluation for Nitrogen Pollution in Water and Surface Sediments of Xixi Wetland",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06456676/12OmNyL0TFJ",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/1/3682a179",
"title": "Study on Retardation of Water Evaporation by Single and Mixed Monolayers at Air/Water Surface",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682a179/12OmNzRHOOU",
"parentPublication": {
"id": "proceedings/esiat/2009/3682/1",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/05999663",
"title": "A Deformable Surface Model for Real-Time Water Drop Animation",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/05999663/13rRUyY28Ys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0933",
"title": "FloW: A Dataset and Benchmark for Floating Waste Detection in Inland Waters",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0933/1BmFV0gjois",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icenit/2022/6307/0/630700a311",
"title": "Application Research of Computer Artificial Intelligence Monitoring System in Surface Water Quality Measurement of Water Conservancy Industry",
"doi": null,
"abstractUrl": "/proceedings-article/icenit/2022/630700a311/1KCSFeyFh5K",
"parentPublication": {
"id": "proceedings/icenit/2022/6307/0",
"title": "2022 International Conference on Education, Network and Information Technology (ICENIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a675",
"title": "An Improved Otsu Method Based on Uniformity Measurement for Segmentation of Water Surface Images",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a675/1ehBKbmDmg0",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KxUhhFgzlK",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1KxVhi7yhR6",
"doi": "10.1109/WACV56688.2023.00086",
"title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields",
"normalizedTitle": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields",
"abstract": "Comprehensive 3D scene understanding, both geometrically and semantically, is important for real-world applications such as robot perception. Most of the existing work has focused on developing data-driven discriminative models for scene understanding. This paper provides a new approach to scene understanding, from a synthesis model perspective, by leveraging the recent progress on implicit scene representation and neural rendering. Building upon the great success of Neural Radiance Fields (NeRFs), we introduce Scene-Property Synthesis with NeRF (SS-NeRF) that is able to not only render photo-realistic RGB images from novel viewpoints, but also render various accurate scene properties (e.g., appearance, geometry, and semantics). By doing so, we facilitate addressing a variety of scene understanding tasks under a unified framework, including semantic segmentation, surface normal estimation, reshading, keypoint detection, and edge detection. Our SS-NeRF framework can be a powerful tool for bridging generative learning and discriminative learning, and thus be beneficial to the investigation of a wide range of interesting problems, such as studying task relationships within a synthesis paradigm, transferring knowledge to novel tasks, facilitating downstream discriminative tasks as ways of data augmentation, and serving as auto-labeller for data creation. Our code is available at https://github.com/zsh2000/SS-NeRF.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Comprehensive 3D scene understanding, both geometrically and semantically, is important for real-world applications such as robot perception. Most of the existing work has focused on developing data-driven discriminative models for scene understanding. This paper provides a new approach to scene understanding, from a synthesis model perspective, by leveraging the recent progress on implicit scene representation and neural rendering. Building upon the great success of Neural Radiance Fields (NeRFs), we introduce Scene-Property Synthesis with NeRF (SS-NeRF) that is able to not only render photo-realistic RGB images from novel viewpoints, but also render various accurate scene properties (e.g., appearance, geometry, and semantics). By doing so, we facilitate addressing a variety of scene understanding tasks under a unified framework, including semantic segmentation, surface normal estimation, reshading, keypoint detection, and edge detection. Our SS-NeRF framework can be a powerful tool for bridging generative learning and discriminative learning, and thus be beneficial to the investigation of a wide range of interesting problems, such as studying task relationships within a synthesis paradigm, transferring knowledge to novel tasks, facilitating downstream discriminative tasks as ways of data augmentation, and serving as auto-labeller for data creation. Our code is available at https://github.com/zsh2000/SS-NeRF.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Comprehensive 3D scene understanding, both geometrically and semantically, is important for real-world applications such as robot perception. Most of the existing work has focused on developing data-driven discriminative models for scene understanding. This paper provides a new approach to scene understanding, from a synthesis model perspective, by leveraging the recent progress on implicit scene representation and neural rendering. Building upon the great success of Neural Radiance Fields (NeRFs), we introduce Scene-Property Synthesis with NeRF (SS-NeRF) that is able to not only render photo-realistic RGB images from novel viewpoints, but also render various accurate scene properties (e.g., appearance, geometry, and semantics). By doing so, we facilitate addressing a variety of scene understanding tasks under a unified framework, including semantic segmentation, surface normal estimation, reshading, keypoint detection, and edge detection. Our SS-NeRF framework can be a powerful tool for bridging generative learning and discriminative learning, and thus be beneficial to the investigation of a wide range of interesting problems, such as studying task relationships within a synthesis paradigm, transferring knowledge to novel tasks, facilitating downstream discriminative tasks as ways of data augmentation, and serving as auto-labeller for data creation. Our code is available at https://github.com/zsh2000/SS-NeRF.",
"fno": "934600a795",
"keywords": [
"Data Augmentation",
"Edge Detection",
"Feature Extraction",
"Image Colour Analysis",
"Image Representation",
"Image Segmentation",
"Learning Artificial Intelligence",
"Rendering Computer Graphics",
"Solid Modelling",
"Accurate Scene Properties",
"Data Driven Discriminative Models",
"Implicit Scene Representation",
"Neural Radiance Fields",
"Neural Rendering",
"Render Photo Realistic RGB Images",
"Scene Understanding Tasks",
"Scene Property Synthesis",
"SS Ne RF Framework",
"Synthesis Model Perspective",
"Synthesis Paradigm",
"Geometry",
"Visualization",
"Three Dimensional Displays",
"Computational Modeling",
"Semantic Segmentation",
"Semantics",
"Rendering Computer Graphics",
"Algorithms Computational Photography",
"Image And Video Synthesis",
"Image Recognition And Understanding Object Detection",
"Categorization",
"Segmentation",
"Scene Modeling",
"Visual Reasoning"
],
"authors": [
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Mingtong Zhang",
"givenName": "Mingtong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Shuhong Zheng",
"givenName": "Shuhong",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Zhipeng Bao",
"givenName": "Zhipeng",
"surname": "Bao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Martial Hebert",
"givenName": "Martial",
"surname": "Hebert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Yu-Xiong Wang",
"givenName": "Yu-Xiong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "795-805",
"year": "2023",
"issn": null,
"isbn": "978-1-6654-9346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1KxVhcf5kQg",
"name": "pwacv202393460-010030817s1-mm_934600a795.zip",
"size": "18 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pwacv202393460-010030817s1-mm_934600a795.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "934600a785",
"articleId": "1KxVq4DixWg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "934600a806",
"articleId": "1KxUHyXEN9u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200f845",
"title": "Nerfies: Deformable Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f845/1BmL0KETWzm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8332",
"title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f460",
"title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2892",
"title": "EfficientNeRF - Efficient Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2892/1H0OvIHTU7S",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f470",
"title": "RegNeRF: Regularizing Neural Radiance Fields for View Synthesis from Sparse Inputs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f470/1H1mpdxQEq4",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f428",
"title": "Point-NeRF: Point-based Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8388",
"title": "NeRFReN: Neural Radiance Fields with Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8388/1H1nhdo3vFe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09999509",
"title": "Neural Radiance Fields from Sparse RGB-D Images for High-Quality View Synthesis",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09999509/1JrMA4xh8o8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0313",
"title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a972",
"title": "Stochastic Neural Radiance Fields: Quantifying Uncertainty in Implicit 3D Representations",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a972/1zWEo0mrRzG",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KxUhhFgzlK",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1L8qmg37qxy",
"doi": "10.1109/WACV56688.2023.00592",
"title": "Urban Scene Semantic Segmentation with Low-Cost Coarse Annotation",
"normalizedTitle": "Urban Scene Semantic Segmentation with Low-Cost Coarse Annotation",
"abstract": "For best performance, today’s semantic segmentation methods use large and carefully labeled datasets, requiring expensive annotation budgets. In this work, we show that coarse annotation is a low-cost but highly effective alternative for training semantic segmentation models. Considering the urban scene segmentation scenario, we lever-age cheap coarse annotations for real-world captured data, as well as synthetic data to train our model and show competitive performance compared with finely annotated real-world data. Specifically, we propose a coarse-to-fine self-training framework that generates pseudo labels for unlabeled regions of the coarsely annotated data, using synthetic data to improve predictions around the boundaries between semantic classes, and using cross-domain data augmentation to increase diversity. Our extensive experimental results on Cityscapes and BDD100k datasets demonstrate that our method achieves a significantly better performance vs annotation cost tradeoff, yielding a comparable performance to fully annotated data with only a small fraction of the annotation budget. Also, when used as pre-training, our framework performs better compared to the standard fully supervised setting.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For best performance, today’s semantic segmentation methods use large and carefully labeled datasets, requiring expensive annotation budgets. In this work, we show that coarse annotation is a low-cost but highly effective alternative for training semantic segmentation models. Considering the urban scene segmentation scenario, we lever-age cheap coarse annotations for real-world captured data, as well as synthetic data to train our model and show competitive performance compared with finely annotated real-world data. Specifically, we propose a coarse-to-fine self-training framework that generates pseudo labels for unlabeled regions of the coarsely annotated data, using synthetic data to improve predictions around the boundaries between semantic classes, and using cross-domain data augmentation to increase diversity. Our extensive experimental results on Cityscapes and BDD100k datasets demonstrate that our method achieves a significantly better performance vs annotation cost tradeoff, yielding a comparable performance to fully annotated data with only a small fraction of the annotation budget. Also, when used as pre-training, our framework performs better compared to the standard fully supervised setting.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For best performance, today’s semantic segmentation methods use large and carefully labeled datasets, requiring expensive annotation budgets. In this work, we show that coarse annotation is a low-cost but highly effective alternative for training semantic segmentation models. Considering the urban scene segmentation scenario, we lever-age cheap coarse annotations for real-world captured data, as well as synthetic data to train our model and show competitive performance compared with finely annotated real-world data. Specifically, we propose a coarse-to-fine self-training framework that generates pseudo labels for unlabeled regions of the coarsely annotated data, using synthetic data to improve predictions around the boundaries between semantic classes, and using cross-domain data augmentation to increase diversity. Our extensive experimental results on Cityscapes and BDD100k datasets demonstrate that our method achieves a significantly better performance vs annotation cost tradeoff, yielding a comparable performance to fully annotated data with only a small fraction of the annotation budget. Also, when used as pre-training, our framework performs better compared to the standard fully supervised setting.",
"fno": "934600f967",
"keywords": [
"Data Augmentation",
"Image Segmentation",
"Learning Artificial Intelligence",
"Object Detection",
"Supervised Learning",
"Annotation Budget",
"Annotation Cost Tradeoff",
"Coarse To Fine Self Training Framework",
"Coarsely Annotated Data",
"Competitive Performance",
"Cross Domain Data Augmentation",
"Expensive Annotation Budgets",
"Fully Annotated Data",
"Highly Effective Alternative",
"Lever Age Cheap Coarse Annotations",
"Low Cost Coarse Annotation",
"Pseudolabels",
"Real World Captured Data",
"Real World Data",
"Semantic Classes",
"Semantic Segmentation Methods",
"Synthetic Data",
"Training Semantic Segmentation Models",
"Urban Scene Segmentation Scenario",
"Urban Scene Semantic Segmentation",
"Training",
"Computer Vision",
"Costs",
"Annotations",
"Semantic Segmentation",
"Semantics",
"Data Models",
"Algorithms Image Recognition And Understanding Object Detection",
"Categorization",
"Segmentation"
],
"authors": [
{
"affiliation": "MPI for Informatics",
"fullName": "Anurag Das",
"givenName": "Anurag",
"surname": "Das",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Yongqin Xian",
"givenName": "Yongqin",
"surname": "Xian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CISPA",
"fullName": "Yang He",
"givenName": "Yang",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPI for Intelligent Systems",
"fullName": "Zeynep Akata",
"givenName": "Zeynep",
"surname": "Akata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPI for Informatics",
"fullName": "Bernt Schiele",
"givenName": "Bernt",
"surname": "Schiele",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "5967-5976",
"year": "2023",
"issn": null,
"isbn": "978-1-6654-9346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "934600f955",
"articleId": "1KxUO7w4eKA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "934600f977",
"articleId": "1KxVgyRFr5m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2008/3454/0/3454a509",
"title": "Multimedia Semantic Annotation Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2008/3454a509/12OmNzgwmKC",
"parentPublication": {
"id": "proceedings/ism/2008/3454/0",
"title": "2008 Tenth IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08115231",
"title": "A Robust 3D-2D Interactive Tool for Scene Segmentation and Annotation",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08115231/14H4WMh20es",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2023/02/09699409",
"title": "Annotation-Efficient COVID-19 Pneumonia Lesion Segmentation Using Error-Aware Unified Semisupervised and Active Learning",
"doi": null,
"abstractUrl": "/journal/ai/2023/02/09699409/1ADJimXQt0Y",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5344",
"title": "Scaling up instance annotation via label propagation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5344/1BmFAQmevn2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600e858",
"title": "Object Localization under Single Coarse Point Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600e858/1H1lTC1rnAQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f893",
"title": "Reducing Annotation Effort by Identifying and Labeling Contextually Diverse Classes for Semantic Segmentation Under Domain Shift",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f893/1L6LFv8kB6U",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f870",
"title": "Human-in-the-Loop Video Semantic Segmentation Auto-Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f870/1LiO7MVX61O",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f289",
"title": "Block Annotation: Better Image Annotation With Sub-Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f289/1hVlNOjUXO8",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i560",
"title": "Boosting Semantic Human Matting With Coarse Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i560/1m3nGi8GNdC",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/4.509E55",
"title": "Coarse-to-Fine Domain Adaptive Semantic Segmentation with Photometric Alignment and Category-Center Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/4.509E55/1yeLU47cKSA",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCmGO1G",
"doi": "10.1109/VR.2017.7892339",
"title": "Simulating anthropomorphic upper body actions in virtual reality using head and hand motion data",
"normalizedTitle": "Simulating anthropomorphic upper body actions in virtual reality using head and hand motion data",
"abstract": "The use of self avatars in virtual reality (VR) can bring users a stronger sense of presence and produce a more compelling experience by providing additional visual feedback during interactions. Avatars also become increasingly more relevant in VR as they provide a user with an identity for social interactions in multi-user settings. However, with current consumer VR setups that include only a head mounted display and hand controllers, implementation of self avatars are generally limited in the ability to mimic actions performed in the real world. Our work explores the idea of simulating a wide range of upper body motions using motion and positional data from only the head and hand motion data. We present a method to differentiate head and hip motions using information from captured motion data and applying corresponding changes to a virtual avatar. We discuss our approach and initial results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of self avatars in virtual reality (VR) can bring users a stronger sense of presence and produce a more compelling experience by providing additional visual feedback during interactions. Avatars also become increasingly more relevant in VR as they provide a user with an identity for social interactions in multi-user settings. However, with current consumer VR setups that include only a head mounted display and hand controllers, implementation of self avatars are generally limited in the ability to mimic actions performed in the real world. Our work explores the idea of simulating a wide range of upper body motions using motion and positional data from only the head and hand motion data. We present a method to differentiate head and hip motions using information from captured motion data and applying corresponding changes to a virtual avatar. We discuss our approach and initial results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of self avatars in virtual reality (VR) can bring users a stronger sense of presence and produce a more compelling experience by providing additional visual feedback during interactions. Avatars also become increasingly more relevant in VR as they provide a user with an identity for social interactions in multi-user settings. However, with current consumer VR setups that include only a head mounted display and hand controllers, implementation of self avatars are generally limited in the ability to mimic actions performed in the real world. Our work explores the idea of simulating a wide range of upper body motions using motion and positional data from only the head and hand motion data. We present a method to differentiate head and hip motions using information from captured motion data and applying corresponding changes to a virtual avatar. We discuss our approach and initial results.",
"fno": "07892339",
"keywords": [
"Avatars",
"Head",
"Turning",
"Tracking",
"Torso",
"Hip",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Texas A&M University, USA",
"fullName": "Dustin T. Han",
"givenName": "Dustin T.",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Texas A&M University, USA",
"fullName": "Shyam Prathish Sargunam",
"givenName": "Shyam Prathish",
"surname": "Sargunam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Texas A&M University, USA",
"fullName": "Eric D. Ragan",
"givenName": "Eric D.",
"surname": "Ragan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "387-388",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892338",
"articleId": "12OmNxWui8h",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892340",
"articleId": "12OmNy68EJv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771384",
"title": "Realistic head motion synthesis for an image-based talking head",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771384/12OmNviZlz1",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811014",
"title": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811014/12OmNzC5T34",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771401",
"title": "Realistic head motion synthesis for an image-based talking head",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771401/12OmNzVoBzX",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a843",
"title": "Redirecting Desktop Interface Input to Animate Cross-Reality Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a843/1CJcxEyRQ64",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8632",
"title": "Neural Head Avatars from Monocular RGB Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8632/1H1htwlAaNa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049669",
"title": "A Systematic Review on the Visualization of Avatars and Agents in AR & VR displayed using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049669/1KYovYmwfpm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797884",
"title": "Distributed, Collaborative Virtual Reality Application for Product Development with Simple Avatar Calibration Method",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797884/1cJ0TJmlU9q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a084",
"title": "Real-Time Gait Reconstruction For Virtual Reality Using a Single Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a084/1pBMjFD8jVm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdi3c/2021/2569/0/256900a041",
"title": "Development of Head Motion Controlled Wheelchair",
"doi": null,
"abstractUrl": "/proceedings-article/icdi3c/2021/256900a041/1xeWFm36bXa",
"parentPublication": {
"id": "proceedings/icdi3c/2021/2569/0",
"title": "2021 International Conference on Design Innovations for 3Cs Compute Communicate Control (ICDI3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmJxzOtk4w",
"doi": "10.1109/ICCV48922.2021.01426",
"title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"normalizedTitle": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"abstract": "We present EgoRenderer, a system for rendering full-body neural avatars of a person captured by a wearable, egocentric fisheye camera that is mounted on a cap or a VR headset. Our system renders photorealistic novel views of the actor and her motion from arbitrary virtual camera locations. Rendering full-body avatars from such egocentric images come with unique challenges due to the top-down view and large distortions. We tackle these challenges by decomposing the rendering process into several steps, including texture synthesis, pose construction, and neural image translation. For texture synthesis, we propose Ego-DPNet, a neural network that infers dense correspondences between the input fisheye images and an underlying parametric body model, and to extract textures from egocentric inputs. In addition, to encode dynamic appearances, our approach also learns an implicit texture stack that captures detailed appearance variation across poses and viewpoints. For correct pose generation, we first estimate body pose from the egocentric view using a parametric model. We then synthesize an external free-viewpoint pose image by projecting the parametric model to the user-specified target viewpoint. We next combine the target pose image and the textures into a combined feature image, which is transformed into the output color image using a neural image translation network. Experimental evaluations show that EgoRenderer is capable of generating realistic free-viewpoint avatars of a person wearing an egocentric camera. Comparisons to several baselines demonstrate the advantages of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present EgoRenderer, a system for rendering full-body neural avatars of a person captured by a wearable, egocentric fisheye camera that is mounted on a cap or a VR headset. Our system renders photorealistic novel views of the actor and her motion from arbitrary virtual camera locations. Rendering full-body avatars from such egocentric images come with unique challenges due to the top-down view and large distortions. We tackle these challenges by decomposing the rendering process into several steps, including texture synthesis, pose construction, and neural image translation. For texture synthesis, we propose Ego-DPNet, a neural network that infers dense correspondences between the input fisheye images and an underlying parametric body model, and to extract textures from egocentric inputs. In addition, to encode dynamic appearances, our approach also learns an implicit texture stack that captures detailed appearance variation across poses and viewpoints. For correct pose generation, we first estimate body pose from the egocentric view using a parametric model. We then synthesize an external free-viewpoint pose image by projecting the parametric model to the user-specified target viewpoint. We next combine the target pose image and the textures into a combined feature image, which is transformed into the output color image using a neural image translation network. Experimental evaluations show that EgoRenderer is capable of generating realistic free-viewpoint avatars of a person wearing an egocentric camera. Comparisons to several baselines demonstrate the advantages of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present EgoRenderer, a system for rendering full-body neural avatars of a person captured by a wearable, egocentric fisheye camera that is mounted on a cap or a VR headset. Our system renders photorealistic novel views of the actor and her motion from arbitrary virtual camera locations. Rendering full-body avatars from such egocentric images come with unique challenges due to the top-down view and large distortions. We tackle these challenges by decomposing the rendering process into several steps, including texture synthesis, pose construction, and neural image translation. For texture synthesis, we propose Ego-DPNet, a neural network that infers dense correspondences between the input fisheye images and an underlying parametric body model, and to extract textures from egocentric inputs. In addition, to encode dynamic appearances, our approach also learns an implicit texture stack that captures detailed appearance variation across poses and viewpoints. For correct pose generation, we first estimate body pose from the egocentric view using a parametric model. We then synthesize an external free-viewpoint pose image by projecting the parametric model to the user-specified target viewpoint. We next combine the target pose image and the textures into a combined feature image, which is transformed into the output color image using a neural image translation network. Experimental evaluations show that EgoRenderer is capable of generating realistic free-viewpoint avatars of a person wearing an egocentric camera. Comparisons to several baselines demonstrate the advantages of our approach.",
"fno": "281200o4508",
"keywords": [
"Headphones",
"Computer Vision",
"Avatars",
"Neural Networks",
"Color",
"Rendering Computer Graphics",
"Cameras",
"Image And Video Synthesis",
"3 D From A Single Image And Shape From X"
],
"authors": [
{
"affiliation": "University of Maryland,Department of Computer Science,College Park",
"fullName": "Tao Hu",
"givenName": "Tao",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Plank Institute for Informatics,Saarland Informatics Campus",
"fullName": "Kripasindhu Sarkar",
"givenName": "Kripasindhu",
"surname": "Sarkar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Plank Institute for Informatics,Saarland Informatics Campus",
"fullName": "Lingjie Liu",
"givenName": "Lingjie",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland,Department of Computer Science,College Park",
"fullName": "Matthias Zwicker",
"givenName": "Matthias",
"surname": "Zwicker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Plank Institute for Informatics,Saarland Informatics Campus",
"fullName": "Christian Theobalt",
"givenName": "Christian",
"surname": "Theobalt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "14508-14518",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200o4499",
"articleId": "1BmEXbEvpjq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200o4519",
"articleId": "1BmLfcOOnhS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a098",
"title": "Detailed Human Avatars from Monocular Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a098/17D45Vw15t7",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1480",
"title": "Estimating Egocentric 3D Human Pose in Global Space",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1480/1BmJUc8RsvS",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5883",
"title": "High-Fidelity Human Avatars from a Single RGB Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5883/1H1hK72b9Je",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3147",
"title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a197",
"title": "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a197/1KYsovTRTC8",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300c382",
"title": "Textured Neural Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300c382/1gyrdPZ8U92",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h727",
"title": "xR-EgoPose: Egocentric 3D Human Pose From an HMD Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h727/1hQqpGOfz3i",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700b771",
"title": "Automatic Calibration of the Fisheye Camera for Egocentric 3D Human Pose Estimation from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700b771/1uqGwG2xnQQ",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f147",
"title": "StylePeople: A Generative Model of Fullbody Human Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f147/1yeILFPUeE8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1htwlAaNa",
"doi": "10.1109/CVPR52688.2022.01810",
"title": "Neural Head Avatars from Monocular RGB Videos",
"normalizedTitle": "Neural Head Avatars from Monocular RGB Videos",
"abstract": "We present Neural Head Avatars, a novel neural representation that explicitly models the surface geometry and appearance of an animatable human avatar that can be used for teleconferencing in AR/VR or other applications in the movie or games industry that rely on a digital human.<sup>1</sup><sup>1</sup>philgras.github.io/neural_head_avatars/neural_head_avatars.html Our representation can be learned from a monocular RGB portrait video that features a range of different expressions and views. Specifically, we propose a hybrid representation consisting of a morphable model for the coarse shape and expressions of the face, and two feed-forward networks, predicting vertex offsets of the underlying mesh as well as a view- and expression-dependent texture. We demonstrate that this representation is able to accurately extrapolate to unseen poses and view points, and generates natural expressions while providing sharp texture details. Compared to previous works on head avatars, our method provides a disentangled shape and appearance model of the complete human head (including hair) that is compatible with the standard graphics pipeline. Moreover, it quantitatively and qualitatively outperforms current state of the art in terms of reconstruction quality and novel-view synthesis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Neural Head Avatars, a novel neural representation that explicitly models the surface geometry and appearance of an animatable human avatar that can be used for teleconferencing in AR/VR or other applications in the movie or games industry that rely on a digital human.<sup>1</sup><sup>1</sup>philgras.github.io/neural_head_avatars/neural_head_avatars.html Our representation can be learned from a monocular RGB portrait video that features a range of different expressions and views. Specifically, we propose a hybrid representation consisting of a morphable model for the coarse shape and expressions of the face, and two feed-forward networks, predicting vertex offsets of the underlying mesh as well as a view- and expression-dependent texture. We demonstrate that this representation is able to accurately extrapolate to unseen poses and view points, and generates natural expressions while providing sharp texture details. Compared to previous works on head avatars, our method provides a disentangled shape and appearance model of the complete human head (including hair) that is compatible with the standard graphics pipeline. Moreover, it quantitatively and qualitatively outperforms current state of the art in terms of reconstruction quality and novel-view synthesis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Neural Head Avatars, a novel neural representation that explicitly models the surface geometry and appearance of an animatable human avatar that can be used for teleconferencing in AR/VR or other applications in the movie or games industry that rely on a digital human.11philgras.github.io/neural_head_avatars/neural_head_avatars.html Our representation can be learned from a monocular RGB portrait video that features a range of different expressions and views. Specifically, we propose a hybrid representation consisting of a morphable model for the coarse shape and expressions of the face, and two feed-forward networks, predicting vertex offsets of the underlying mesh as well as a view- and expression-dependent texture. We demonstrate that this representation is able to accurately extrapolate to unseen poses and view points, and generates natural expressions while providing sharp texture details. Compared to previous works on head avatars, our method provides a disentangled shape and appearance model of the complete human head (including hair) that is compatible with the standard graphics pipeline. Moreover, it quantitatively and qualitatively outperforms current state of the art in terms of reconstruction quality and novel-view synthesis.",
"fno": "694600s8632",
"keywords": [
"Avatars",
"Computer Animation",
"Computer Graphics",
"Face Recognition",
"Image Colour Analysis",
"Image Reconstruction",
"Image Texture",
"Learning Artificial Intelligence",
"Pose Estimation",
"Solid Modelling",
"Teleconferencing",
"Neural Head Avatars",
"Monocular RGB Videos",
"Novel Neural Representation",
"Animatable Human Avatar",
"Monocular RGB Portrait Video",
"Different Expressions",
"Hybrid Representation",
"Coarse Shape",
"Expression Dependent Texture",
"Disentangled Shape",
"Appearance Model",
"Complete Human Head",
"Geometry",
"Teleconferencing",
"Head",
"Three Dimensional Displays",
"Shape",
"Avatars",
"Face Recognition"
],
"authors": [
{
"affiliation": "Heidelberg University",
"fullName": "Philip-William Grassal",
"givenName": "Philip-William",
"surname": "Grassal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Heidelberg University",
"fullName": "Malte Prinzler",
"givenName": "Malte",
"surname": "Prinzler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Heidelberg University",
"fullName": "Titus Leistner",
"givenName": "Titus",
"surname": "Leistner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Heidelberg University",
"fullName": "Carsten Rother",
"givenName": "Carsten",
"surname": "Rother",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Matthias Nießner",
"givenName": "Matthias",
"surname": "Nießner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Justus Thies",
"givenName": "Justus",
"surname": "Thies",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "18632-18643",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1htsluL6M",
"name": "pcvpr202269460-09880001s1-mm_694600s8632.zip",
"size": "15 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880001s1-mm_694600s8632.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600s8622",
"articleId": "1H1jdnZPS0g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600s8644",
"articleId": "1H1hvx4136E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504726",
"title": "Redirected head gaze to support AR meetings distributed over heterogeneous environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504726/12OmNBOCWvM",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892339",
"title": "Simulating anthropomorphic upper body actions in virtual reality using head and hand motion data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892339/12OmNCmGO1G",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667059",
"title": "Emotion Editing in Head Reenactment Videos using Latent Space Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667059/1A6BJzpRJcs",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3535",
"title": "I M Avatar: Implicit Morphable Head Avatars from Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3535/1H1j2BWBE2c",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049669",
"title": "A Systematic Review on the Visualization of Avatars and Agents in AR & VR displayed using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049669/1KYovYmwfpm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a775",
"title": "SIRA: Relightable Avatars from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a775/1L6LvQR1bs4",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i645",
"title": "Dynamic Neural Radiance Fields for Monocular 4D Facial Avatar Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i645/1yeHVNYk40M",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1728",
"title": "Pixel-aligned Volumetric Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1728/1yeHX163Xnq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQZ2SKCuQ",
"doi": "10.1109/ISMAR55827.2022.00063",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"normalizedTitle": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"abstract": "When using a see-through augmented reality head-mounted display system (AR HMD), a user’s perception of virtual content may be degraded by a variety of perceptual artifacts resulting from the architecture of rendering and display pipelines. In particular, virtual content that is rendered to appear stationary in the real world (worldlocked) can be susceptible to spatial and temporal 3D position errors. A subset of these errors, termed jitter, result from mismatches between the spatial localization, rendering, and display pipelines, and can manifest as perceived motion of intended-to-be stationary content. Here, we employ psychophysical methods to quantify the perceptibility of jitter artifacts in an AR HMD. For some viewing conditions, participants perceived jitter that was smaller than the pixel pitch of the testbed (i.e., subpixel jitter). In general, we found that jitter perceptibility increased as viewing distance increased and decreased as background luminance increased. We did not find that the contrast ratio of virtual content, age, or experience with AR/VR modulatedjitter perceptibility. Taken together, this study quantifies the degree of jitter that a user can perceive in an AR HMD and demonstrates that it is critical to consider the capabilities and limits of the human visual system when designing the next generation of spatial computing platforms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When using a see-through augmented reality head-mounted display system (AR HMD), a user’s perception of virtual content may be degraded by a variety of perceptual artifacts resulting from the architecture of rendering and display pipelines. In particular, virtual content that is rendered to appear stationary in the real world (worldlocked) can be susceptible to spatial and temporal 3D position errors. A subset of these errors, termed jitter, result from mismatches between the spatial localization, rendering, and display pipelines, and can manifest as perceived motion of intended-to-be stationary content. Here, we employ psychophysical methods to quantify the perceptibility of jitter artifacts in an AR HMD. For some viewing conditions, participants perceived jitter that was smaller than the pixel pitch of the testbed (i.e., subpixel jitter). In general, we found that jitter perceptibility increased as viewing distance increased and decreased as background luminance increased. We did not find that the contrast ratio of virtual content, age, or experience with AR/VR modulatedjitter perceptibility. Taken together, this study quantifies the degree of jitter that a user can perceive in an AR HMD and demonstrates that it is critical to consider the capabilities and limits of the human visual system when designing the next generation of spatial computing platforms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When using a see-through augmented reality head-mounted display system (AR HMD), a user’s perception of virtual content may be degraded by a variety of perceptual artifacts resulting from the architecture of rendering and display pipelines. In particular, virtual content that is rendered to appear stationary in the real world (worldlocked) can be susceptible to spatial and temporal 3D position errors. A subset of these errors, termed jitter, result from mismatches between the spatial localization, rendering, and display pipelines, and can manifest as perceived motion of intended-to-be stationary content. Here, we employ psychophysical methods to quantify the perceptibility of jitter artifacts in an AR HMD. For some viewing conditions, participants perceived jitter that was smaller than the pixel pitch of the testbed (i.e., subpixel jitter). In general, we found that jitter perceptibility increased as viewing distance increased and decreased as background luminance increased. We did not find that the contrast ratio of virtual content, age, or experience with AR/VR modulatedjitter perceptibility. Taken together, this study quantifies the degree of jitter that a user can perceive in an AR HMD and demonstrates that it is critical to consider the capabilities and limits of the human visual system when designing the next generation of spatial computing platforms.",
"fno": "532500a470",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Jitter",
"Rendering Computer Graphics",
"Virtual Reality",
"Visual Perception",
"AR HMD",
"Augmented Reality Head Mounted Display System",
"Augmented Reality Head Mounted Displays",
"Display Pipelines",
"Jitter Perceptibility",
"Particular Content",
"Perceived Motion",
"Perceptual Artifacts",
"Rendering",
"Spatial Computing Platforms",
"Spatial D Position Errors",
"Spatial Localization",
"Stationary Content",
"Subpixel Jitter",
"Temporal 3 D Position Errors",
"Viewing Conditions",
"Virtual Content",
"Location Awareness",
"Head Mounted Displays",
"Three Dimensional Displays",
"Pipelines",
"Resists",
"Jitter",
"Visual Systems",
"Human Centered Computing",
"Human Computer Interaction HCI",
"HCI Design And Evaluation Methods",
"Laboratory Experiments Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Meta",
"fullName": "James P. Wilmott",
"givenName": "James P.",
"surname": "Wilmott",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta",
"fullName": "Ian M. Erkelens",
"givenName": "Ian M.",
"surname": "Erkelens",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta",
"fullName": "T. Scott Murdison",
"givenName": "T. Scott",
"surname": "Murdison",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta",
"fullName": "Kevin W. Rio",
"givenName": "Kevin W.",
"surname": "Rio",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "470-478",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a460",
"articleId": "1JrR6BnYp6U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a479",
"articleId": "1JrR13pPxBK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a584",
"title": "Investigating Display Position of a Head-Fixed Augmented Reality Notification for Dual-task",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a584/1CJd297BiDu",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a477",
"title": "Digital Precompensation for Luminance Nonuniformities in Augmented Reality Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a477/1J7WkpqbbYA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09253561",
"title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a001",
"title": "Perception-Driven Hybrid Foveated Depth of Field Rendering for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a001/1yeCURkWXpS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrRdnGe43C",
"doi": "10.1109/ISMAR55827.2022.00027",
"title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"normalizedTitle": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"abstract": "Text selection is an essential activity in interactive systems, including virtual reality (VR) head-mounted displays (HMDs). It is useful for: sharing information across apps or platforms, highlighting and making notes while reading articles, and text editing tasks. Despite its usefulness, the space of text selection interaction is underexplored in VR HMDs. In this research, we performed a user study with 24 participants to investigate the performance and user preference of six text selection techniques (Controller+Dwell, Controller+Click, Head+Dwell, Head+Click, Hand+Dwell, Hand+Pinch). Results reveal that Head+Click is ranked first since it has excellent speedaccuracy performance (2nd fastest task completion speed with 3rd lowest total error rate), provides the best user experience, and produces a very low workload—followed by Controller+Click, which has the fastest speed and comparable experience with Head+Click, but much higher total error rate. Other methods can also be useful depending on the goals of the system or the users. As a first systematic evaluation of pointing Z_$\\times$_Z selection techniques for text selection in VR, the results of this work provide a strong foundation for further research in this area of growing importance to the future of VR to help it become a more ubiquitous and pervasive platform.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text selection is an essential activity in interactive systems, including virtual reality (VR) head-mounted displays (HMDs). It is useful for: sharing information across apps or platforms, highlighting and making notes while reading articles, and text editing tasks. Despite its usefulness, the space of text selection interaction is underexplored in VR HMDs. In this research, we performed a user study with 24 participants to investigate the performance and user preference of six text selection techniques (Controller+Dwell, Controller+Click, Head+Dwell, Head+Click, Hand+Dwell, Hand+Pinch). Results reveal that Head+Click is ranked first since it has excellent speedaccuracy performance (2nd fastest task completion speed with 3rd lowest total error rate), provides the best user experience, and produces a very low workload—followed by Controller+Click, which has the fastest speed and comparable experience with Head+Click, but much higher total error rate. Other methods can also be useful depending on the goals of the system or the users. As a first systematic evaluation of pointing $\\times$ selection techniques for text selection in VR, the results of this work provide a strong foundation for further research in this area of growing importance to the future of VR to help it become a more ubiquitous and pervasive platform.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text selection is an essential activity in interactive systems, including virtual reality (VR) head-mounted displays (HMDs). It is useful for: sharing information across apps or platforms, highlighting and making notes while reading articles, and text editing tasks. Despite its usefulness, the space of text selection interaction is underexplored in VR HMDs. In this research, we performed a user study with 24 participants to investigate the performance and user preference of six text selection techniques (Controller+Dwell, Controller+Click, Head+Dwell, Head+Click, Hand+Dwell, Hand+Pinch). Results reveal that Head+Click is ranked first since it has excellent speedaccuracy performance (2nd fastest task completion speed with 3rd lowest total error rate), provides the best user experience, and produces a very low workload—followed by Controller+Click, which has the fastest speed and comparable experience with Head+Click, but much higher total error rate. Other methods can also be useful depending on the goals of the system or the users. As a first systematic evaluation of pointing - selection techniques for text selection in VR, the results of this work provide a strong foundation for further research in this area of growing importance to the future of VR to help it become a more ubiquitous and pervasive platform.",
"fno": "532500a131",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Interactive Systems",
"Text Analysis",
"Text Editing",
"User Interfaces",
"Virtual Reality",
"2nd Fastest Task Completion Speed",
"3rd Lowest Total Error Rate",
"Controller Click",
"Controller Dwell",
"Excellent Speedaccuracy Performance",
"Hand Dwell",
"Head Click",
"Head Dwell",
"Higher Total Error Rate",
"Highlighting Making Notes",
"Interactive Systems",
"Text Editing Tasks",
"Text Selection Interaction",
"Text Selection Techniques",
"Useful Depending",
"User Experience",
"User Preference",
"Virtual Reality Head Mounted Displays",
"VR HM Ds",
"Head Mounted Displays",
"Systematics",
"Error Analysis",
"Interactive Systems",
"Aerospace Electronics",
"User Experience",
"Neck",
"Text Selection",
"Virtual Reality",
"Pointing Methods",
"Selection Mechanisms",
"User Study",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality Human Centered Computing",
"Interaction Techniques Human Centered Computing",
"Interaction Design",
"Empirical Studies In Interaction Design"
],
"authors": [
{
"affiliation": "Birmingham City University,DMT Lab,Birmingham,UK",
"fullName": "Wenge Xu",
"givenName": "Wenge",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Suzhou,China",
"fullName": "Xuanru Meng",
"givenName": "Xuanru",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Suzhou,China",
"fullName": "Kangyou Yu",
"givenName": "Kangyou",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Birmingham City University,DMT Lab,Birmingham,UK",
"fullName": "Sayan Sarcar",
"givenName": "Sayan",
"surname": "Sarcar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Suzhou,China",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "131-140",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a121",
"articleId": "1JrQUpt2CME",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a141",
"articleId": "1JrRepqALbW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2008/2047/0/04476604",
"title": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476604/12OmNzdoMAW",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a470",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08723303",
"title": "Errata to “RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions” [May 19 1991-2001]",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797901",
"title": "DepthText: Leveraging Head Movements towards the Depth Dimension for Hands-free Text Entry in Mobile Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797901/1cJ13BSrOkU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a344",
"title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a118",
"title": "Exploring Head-based Mode-Switching in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a118/1yeD1RhEseY",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysn4uy67C",
"doi": "10.1109/ISMAR-Adjunct.2019.00022",
"title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars",
"normalizedTitle": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars",
"abstract": "We propose a concept in this poster paper, the Kuroko Paradigm, which is able to enhance user engagement during interaction with an augmented reality (AR) avatar by adding a physical object to the interaction with the avatar. With the development of AR and VR, interactions between users and AR avatars have been realized with different approaches. However, most of such interactions and experiences are passive, from which users do not expect a high level of engagement. We hypothesize that by introducing a reality actuator, such as a robot or a drone, to handle a physical object triggered by the user without being noticed, and rendering AR avatars as interacting with the physical object at the same time, user engagement during the experience will be enhanced. To prove this concept, we conducted an experiment emulating a classic game of catch. In the experiment, a user will try to throw a ball to an AR avatar, and the ball will be caught by a reality actuator. From the user's perspective, the ball is caught by the AR avatar. In the future, we plan to extend the experiment by adding control groups with differing conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a concept in this poster paper, the Kuroko Paradigm, which is able to enhance user engagement during interaction with an augmented reality (AR) avatar by adding a physical object to the interaction with the avatar. With the development of AR and VR, interactions between users and AR avatars have been realized with different approaches. However, most of such interactions and experiences are passive, from which users do not expect a high level of engagement. We hypothesize that by introducing a reality actuator, such as a robot or a drone, to handle a physical object triggered by the user without being noticed, and rendering AR avatars as interacting with the physical object at the same time, user engagement during the experience will be enhanced. To prove this concept, we conducted an experiment emulating a classic game of catch. In the experiment, a user will try to throw a ball to an AR avatar, and the ball will be caught by a reality actuator. From the user's perspective, the ball is caught by the AR avatar. In the future, we plan to extend the experiment by adding control groups with differing conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a concept in this poster paper, the Kuroko Paradigm, which is able to enhance user engagement during interaction with an augmented reality (AR) avatar by adding a physical object to the interaction with the avatar. With the development of AR and VR, interactions between users and AR avatars have been realized with different approaches. However, most of such interactions and experiences are passive, from which users do not expect a high level of engagement. We hypothesize that by introducing a reality actuator, such as a robot or a drone, to handle a physical object triggered by the user without being noticed, and rendering AR avatars as interacting with the physical object at the same time, user engagement during the experience will be enhanced. To prove this concept, we conducted an experiment emulating a classic game of catch. In the experiment, a user will try to throw a ball to an AR avatar, and the ball will be caught by a reality actuator. From the user's perspective, the ball is caught by the AR avatar. In the future, we plan to extend the experiment by adding control groups with differing conditions.",
"fno": "476500a026",
"keywords": [
"Augmented Reality",
"Avatars",
"Human Computer Interaction",
"Rendering Computer Graphics",
"Physical Interaction",
"AR Avatar",
"User Engagement",
"Augmented Reality Avatar",
"Physical Object",
"Reality Actuator",
"Kuroko Paradigm",
"AR Avatars",
"Rendering",
"Avatars",
"Actuators",
"Augmented Reality",
"Resists",
"Manipulators",
"Computing Methodologies",
"Computer Graphics",
"Mixed Augmented Reality",
"Graphics Systemsand Interface"
],
"authors": [
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Tianyang Gao",
"givenName": "Tianyang",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "26-27",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a022",
"articleId": "1gysn0YPLm8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a028",
"articleId": "1gysmKtgeju",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a450",
"title": "What Can I Do There? Controlling AR Self-Avatars to Better Perceive Affordances of the Real World",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a450/1JrQVmURYMo",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a666",
"title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049669",
"title": "A Systematic Review on the Visualization of Avatars and Agents in AR & VR displayed using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049669/1KYovYmwfpm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798044",
"title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a167",
"title": "DroneCamo: Modifying Human-Drone Comfort via Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a167/1gysj7RryWk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a019",
"title": "AR Mini-Games for Supermarkets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a019/1pBMf0WeeVa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a462",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2020/9171/0/917100a229",
"title": "Research on Mobile AR Language Learning Environment Based on Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2020/917100a229/1qyxq6q6MPS",
"parentPublication": {
"id": "proceedings/eitt/2020/9171/0",
"title": "2020 Ninth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBzRNrw",
"title": "2013 46th Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNARiLZR",
"doi": "10.1109/HICSS.2013.28",
"title": "3D Virtual Environments and Corporate Learning: An Empirical Investigation of Benefits",
"normalizedTitle": "3D Virtual Environments and Corporate Learning: An Empirical Investigation of Benefits",
"abstract": "Organizations invest significant resources in learning and development (L&D) to both enhance and protect their human capital. As such, they continue to search for innovative design and delivery approaches that are both cost efficient and learning effective. In this article, we consider one organization's use of a 3D virtual environment (VE) to bring a managerial and leadership development program, informed by collaborative learning principles, to globally distributed participants. To date, there is little empirical evidence that attests to the specific learning benefits of a VE, that is, benefits that derive from distinguishing features such as presence (i.e., the sense of 'being there' in the VE). Given this, and drawing from prior research, we develop and empirically test a model that examines the relationships among organizational participants' perceptions of presence, teamwork quality and outcomes. Our results provide important insights into the mechanisms underlying L&D processes and outcomes in VEs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Organizations invest significant resources in learning and development (L&D) to both enhance and protect their human capital. As such, they continue to search for innovative design and delivery approaches that are both cost efficient and learning effective. In this article, we consider one organization's use of a 3D virtual environment (VE) to bring a managerial and leadership development program, informed by collaborative learning principles, to globally distributed participants. To date, there is little empirical evidence that attests to the specific learning benefits of a VE, that is, benefits that derive from distinguishing features such as presence (i.e., the sense of 'being there' in the VE). Given this, and drawing from prior research, we develop and empirically test a model that examines the relationships among organizational participants' perceptions of presence, teamwork quality and outcomes. Our results provide important insights into the mechanisms underlying L&D processes and outcomes in VEs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Organizations invest significant resources in learning and development (L&D) to both enhance and protect their human capital. As such, they continue to search for innovative design and delivery approaches that are both cost efficient and learning effective. In this article, we consider one organization's use of a 3D virtual environment (VE) to bring a managerial and leadership development program, informed by collaborative learning principles, to globally distributed participants. To date, there is little empirical evidence that attests to the specific learning benefits of a VE, that is, benefits that derive from distinguishing features such as presence (i.e., the sense of 'being there' in the VE). Given this, and drawing from prior research, we develop and empirically test a model that examines the relationships among organizational participants' perceptions of presence, teamwork quality and outcomes. Our results provide important insights into the mechanisms underlying L&D processes and outcomes in VEs.",
"fno": "4892a893",
"keywords": [
"Teamwork",
"Lead",
"Avatars",
"Collaborative Work",
"Context",
"Organizations",
"Field Study",
"3 D Virtual Environments",
"Presence",
"Performance",
"Learning And Development",
"Virtual Teams"
],
"authors": [
{
"affiliation": null,
"fullName": "Anne P. Massey",
"givenName": "Anne P.",
"surname": "Massey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mitzi M. Montoya",
"givenName": "Mitzi M.",
"surname": "Montoya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Philip Fei Wu",
"givenName": "Philip Fei",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2013-01-01T00:00:00",
"pubType": "proceedings",
"pages": "893-902",
"year": "2013",
"issn": "1530-1605",
"isbn": "978-1-4673-5933-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4892a883",
"articleId": "12OmNwekjyJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4892a903",
"articleId": "12OmNzYwbXd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2003/1882/0/18820141",
"title": "Effect of Latency on Presence in Stressful Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820141/12OmNAFnCwJ",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2003/1882/0/18820125",
"title": "Effects of Handling Real Objects and Avatar Fidelity On Cognitive Task Performance in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820125/12OmNC4eSHb",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic4e/2010/5680/0/05432499",
"title": "Investigation of the Relation Between Interaction and Sense of Presence in Educational Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ic4e/2010/05432499/12OmNC4wtyh",
"parentPublication": {
"id": "proceedings/ic4e/2010/5680/0",
"title": "2010 International Conference on e-Education, e-Business, e-Management, and e-Learning, (IC4E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2008/3167/0/3167a487",
"title": "Immersion Adoption: Design Principles for Self-Guided Learning in Virtual Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2008/3167a487/12OmNxvO02p",
"parentPublication": {
"id": "proceedings/icalt/2008/3167/0",
"title": "IEEE International Conference on Advanced Learning Technologies (ICALT 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444791",
"title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444791/12OmNyoAA64",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446448",
"title": "Agency Enhances Body Ownership Illusion of Being a Virtual Bat",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446448/13bd1gzWkRR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seet/2022/9592/0/959200a235",
"title": "Respond to Change or Die: An Educational Scrum Simulation for Distributed Teams",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seet/2022/959200a235/1EaOP8RvQJO",
"parentPublication": {
"id": "proceedings/icse-seet/2022/9592/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049736",
"title": "Effects of the Visual Fidelity of Virtual Environments on Presence, Context-dependent Forgetting, and Source-monitoring Error",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049736/1KYowRibw1q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798345",
"title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a292",
"title": "Future Visions for Higher Education: An Investigation of the Benefits of Virtual Reality for Teaching University Students",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a292/1yeQCjNwazK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzRZpZU",
"title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments",
"acronym": "pive",
"groupId": "1801625",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBSSV9H",
"doi": "10.1109/PIVE.2012.6229792",
"title": "Differences in presence between healthy users and users with multiple sclerosis",
"normalizedTitle": "Differences in presence between healthy users and users with multiple sclerosis",
"abstract": "We are investigating how persons with mobility impairments due to multiple sclerosis (MS) experience the sense of presence in a Virtual Environment (VE). Since the mid 90's, virtual reality (VR) research has produced a rich knowledge of how design and interaction features of VEs affect presence. However, to our knowledge almost all of the previous research studies have been conducted only with healthy persons. Thus, it is not known how these factors affect the presence of mobility impaired persons, which could have implications for VR-based rehabilitation. To begin investigating this, we replicated a classic VR experiment that has been replicated many times before, but we ran the study with a different population: 10 persons with mobility impairments caused by MS and another 5 healthy persons of similar demographics (e.g., age) as our MS population. This paper compares how these two groups experience presence and discusses the differences we found in our study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We are investigating how persons with mobility impairments due to multiple sclerosis (MS) experience the sense of presence in a Virtual Environment (VE). Since the mid 90's, virtual reality (VR) research has produced a rich knowledge of how design and interaction features of VEs affect presence. However, to our knowledge almost all of the previous research studies have been conducted only with healthy persons. Thus, it is not known how these factors affect the presence of mobility impaired persons, which could have implications for VR-based rehabilitation. To begin investigating this, we replicated a classic VR experiment that has been replicated many times before, but we ran the study with a different population: 10 persons with mobility impairments caused by MS and another 5 healthy persons of similar demographics (e.g., age) as our MS population. This paper compares how these two groups experience presence and discusses the differences we found in our study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We are investigating how persons with mobility impairments due to multiple sclerosis (MS) experience the sense of presence in a Virtual Environment (VE). Since the mid 90's, virtual reality (VR) research has produced a rich knowledge of how design and interaction features of VEs affect presence. However, to our knowledge almost all of the previous research studies have been conducted only with healthy persons. Thus, it is not known how these factors affect the presence of mobility impaired persons, which could have implications for VR-based rehabilitation. To begin investigating this, we replicated a classic VR experiment that has been replicated many times before, but we ran the study with a different population: 10 persons with mobility impairments caused by MS and another 5 healthy persons of similar demographics (e.g., age) as our MS population. This paper compares how these two groups experience presence and discusses the differences we found in our study.",
"fno": "06229792",
"keywords": [
"Diseases",
"Handicapped Aids",
"Virtual Reality",
"Multiple Sclerosis",
"Virtual Environment",
"Virtual Reality Research",
"Mobility Impaired Persons",
"VR Based Rehabilitation",
"Legged Locomotion",
"Fatigue",
"Training",
"Games",
"Virtual Environments",
"Navigation",
"Standards",
"Virtual Reality",
"Presence",
"Multiple Sclerosis",
"Rehabilitation",
"User Studies"
],
"authors": [
{
"affiliation": "The University of Texas at San Antonio, USA",
"fullName": "Rongkai Guo",
"givenName": "Rongkai",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at San Antonio, USA",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pive",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1218-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "06229793",
"articleId": "12OmNrJRPoT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460041",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460041/12OmNwF0BS2",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802059",
"title": "A unique way to increase presence of mobility impaired users — Increasing confidence in balance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802059/12OmNxGAL3n",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504764",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504764/12OmNy6qfPt",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131742",
"title": "Methods to reduce cybersickness and enhance presence for in-place navigation techniques",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131742/12OmNyxFKaM",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223376",
"title": "I'm There! The influence of virtual reality and mixed reality environments combined with two different navigation methods on presence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223376/12OmNzYwcc3",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446194",
"title": "Cybersickness-Provoking Virtual Reality Alters Brain Signals of Persons with Multiple Sclerosis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446194/13bd1gzWkQm",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049680",
"title": "Assisted walking-in-place: Introducing assisted motion to walking-by-cycling in embodied virtual reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049680/1KYolEFtr6U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798348",
"title": "Individual Differences in Embodied Distance Estimation in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798348/1cJ0H4fRjBS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090521",
"title": "A Constrained Path Redirection for Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090521/1jIxpAQuq8o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090560",
"title": "Either Give Me a Reason to Stand or an Opportunity to Sit in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090560/1jIxzjmEoeY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4iw4",
"title": "Proceedings IEEE Virtual Reality 2003",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4eSHb",
"doi": "10.1109/VR.2003.1191130",
"title": "Effects of Handling Real Objects and Avatar Fidelity On Cognitive Task Performance in Virtual Environments",
"normalizedTitle": "Effects of Handling Real Objects and Avatar Fidelity On Cognitive Task Performance in Virtual Environments",
"abstract": "Immersive virtual environments (VEs) provide participants with computer-generated environments filled with virtual objects to assist in learning, training, and practicing dangerous and/or expensive tasks. But for certain tasks, does having every object being virtual inhibit the interactivity? Further, does the virtual object?s visual fidelity affect performance? Overall VE effectiveness may be reduced if users spend most of their time and cognitive capacity learning how to interact and adapting to interacting with a purely virtual environment. We investigated how handling real objects and how self-avatar visual fidelity affects performance on a spatial cognitive task in an immersive VE. We compared participants? performance on a block arrangement task in both a real-space environment and several virtual and hybrid environments. The results showed that manipulating real objects in a VE brings task performance closer to that of real space, compared to manipulating virtual objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive virtual environments (VEs) provide participants with computer-generated environments filled with virtual objects to assist in learning, training, and practicing dangerous and/or expensive tasks. But for certain tasks, does having every object being virtual inhibit the interactivity? Further, does the virtual object?s visual fidelity affect performance? Overall VE effectiveness may be reduced if users spend most of their time and cognitive capacity learning how to interact and adapting to interacting with a purely virtual environment. We investigated how handling real objects and how self-avatar visual fidelity affects performance on a spatial cognitive task in an immersive VE. We compared participants? performance on a block arrangement task in both a real-space environment and several virtual and hybrid environments. The results showed that manipulating real objects in a VE brings task performance closer to that of real space, compared to manipulating virtual objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive virtual environments (VEs) provide participants with computer-generated environments filled with virtual objects to assist in learning, training, and practicing dangerous and/or expensive tasks. But for certain tasks, does having every object being virtual inhibit the interactivity? Further, does the virtual object?s visual fidelity affect performance? Overall VE effectiveness may be reduced if users spend most of their time and cognitive capacity learning how to interact and adapting to interacting with a purely virtual environment. We investigated how handling real objects and how self-avatar visual fidelity affects performance on a spatial cognitive task in an immersive VE. We compared participants? performance on a block arrangement task in both a real-space environment and several virtual and hybrid environments. The results showed that manipulating real objects in a VE brings task performance closer to that of real space, compared to manipulating virtual objects.",
"fno": "18820125",
"keywords": [],
"authors": [
{
"affiliation": "University of North Carolina at Charlotte",
"fullName": "Benjamin Lok",
"givenName": "Benjamin",
"surname": "Lok",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Corporation",
"fullName": "Samir Naik",
"givenName": "Samir",
"surname": "Naik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Mary Whitton",
"givenName": "Mary",
"surname": "Whitton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Frederick P. Brooks Jr.",
"givenName": "Frederick P.",
"surname": "Brooks Jr.",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-03-01T00:00:00",
"pubType": "proceedings",
"pages": "125",
"year": "2003",
"issn": "1087-8270",
"isbn": "0-7695-1882-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18820121",
"articleId": "12OmNxw5BxW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "18820133",
"articleId": "12OmNC8uRAn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrais/1997/7843/0/78430038",
"title": "Evaluation of the effects of frame time variation on VR task performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1997/78430038/12OmNBBQZnQ",
"parentPublication": {
"id": "proceedings/vrais/1997/7843/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2013/1604/0/06618352",
"title": "A twofold approach to object and avatar data management in P2P-based virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618352/12OmNC2fGxp",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480759",
"title": "High-Fidelity Avatar Eye-Representation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480759/12OmNrJAdQR",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161049",
"title": "The Influence of Visual Appearance of User's Avatar on the Manipulation of Objects in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161049/12OmNwe2Ipd",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892241",
"title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1996/7295/0/72950163",
"title": "A dataflow representation for defining behaviours within virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1996/72950163/12OmNyQYtb6",
"parentPublication": {
"id": "proceedings/vrais/1996/7295/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049736",
"title": "Effects of the Visual Fidelity of Virtual Environments on Presence, Context-dependent Forgetting, and Source-monitoring Error",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049736/1KYowRibw1q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998353",
"title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998353/1hpPDKs9c7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a064",
"title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a703",
"title": "COVIZ: Visualization of Effects of COVID-19 on New York City Through Socially Impactful Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a703/1tnXHD0pqFi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwMXnv0",
"title": "2014 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxGAL3n",
"doi": "10.1109/VR.2014.6802059",
"title": "A unique way to increase presence of mobility impaired users — Increasing confidence in balance",
"normalizedTitle": "A unique way to increase presence of mobility impaired users — Increasing confidence in balance",
"abstract": "Previous research on healthy subjects showed that a higher sense of presence can be elicited through full body avatars versus no avatar. However, minimal avatar research has been conducted with persons with mobility impairments. For these users, Virtual Environments (VEs) and avatars are becoming more common as tools for rehabilitation. If we can maximize presence in these VEs, users may be more effectively distracted from the pain and repetitiveness of rehabilitation, thereby increasing users' motivation. To investigate this we replicated the classic virtual pit experiment and included a responsive full body avatar (or lack thereof) as a 3D user interface. We recruited from two different populations: mobility impaired persons and healthy persons as a control. Results give insight into many other differences between healthy and mobility impaired users' experience of presence in VEs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Previous research on healthy subjects showed that a higher sense of presence can be elicited through full body avatars versus no avatar. However, minimal avatar research has been conducted with persons with mobility impairments. For these users, Virtual Environments (VEs) and avatars are becoming more common as tools for rehabilitation. If we can maximize presence in these VEs, users may be more effectively distracted from the pain and repetitiveness of rehabilitation, thereby increasing users' motivation. To investigate this we replicated the classic virtual pit experiment and included a responsive full body avatar (or lack thereof) as a 3D user interface. We recruited from two different populations: mobility impaired persons and healthy persons as a control. Results give insight into many other differences between healthy and mobility impaired users' experience of presence in VEs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Previous research on healthy subjects showed that a higher sense of presence can be elicited through full body avatars versus no avatar. However, minimal avatar research has been conducted with persons with mobility impairments. For these users, Virtual Environments (VEs) and avatars are becoming more common as tools for rehabilitation. If we can maximize presence in these VEs, users may be more effectively distracted from the pain and repetitiveness of rehabilitation, thereby increasing users' motivation. To investigate this we replicated the classic virtual pit experiment and included a responsive full body avatar (or lack thereof) as a 3D user interface. We recruited from two different populations: mobility impaired persons and healthy persons as a control. Results give insight into many other differences between healthy and mobility impaired users' experience of presence in VEs.",
"fno": "06802059",
"keywords": [
"Avatars",
"Virtual Environments",
"Fatigue",
"Visualization",
"Three Dimensional Displays",
"Sociology",
"User Studies",
"Virtual Reality",
"Presence",
"Mobility Impairments",
"Avatar",
"Rehabilitation"
],
"authors": [
{
"affiliation": "The University of Texas at San Antonio",
"fullName": "Rongkai Guo",
"givenName": "Rongkai",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at San Antonio",
"fullName": "Gayani Samaraweera",
"givenName": "Gayani",
"surname": "Samaraweera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at San Antonio",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-03-01T00:00:00",
"pubType": "proceedings",
"pages": "77-78",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-2871-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06802058",
"articleId": "12OmNAObbAg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06802060",
"articleId": "12OmNqGRG7X",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pive/2012/1218/0/06229792",
"title": "Differences in presence between healthy users and users with multiple sclerosis",
"doi": null,
"abstractUrl": "/proceedings-article/pive/2012/06229792/12OmNBSSV9H",
"parentPublication": {
"id": "proceedings/pive/2012/1218/0",
"title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550192",
"title": "Latency and avatars in Virtual Environments and the effects on gait for persons with mobility impairments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550192/12OmNBkP3y4",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549378",
"title": "Latency and avatars in virtual environments and the effects on gait for persons with mobility impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549378/12OmNBvkdnR",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ms/2015/7284/0/7284a431",
"title": "Evaluating Crowd Sourced Navigation for the Visually Impaired in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ms/2015/7284a431/12OmNrJ11Ii",
"parentPublication": {
"id": "proceedings/ms/2015/7284/0",
"title": "2015 IEEE International Conference on Mobile Services (MS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892241",
"title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504764",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504764/12OmNy6qfPt",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404644",
"title": "Exchange of Avatars: Toward a Better Perception and Understanding",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404644/13rRUB7a1fR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a347",
"title": "Fully Automatic Blendshape Generation for Stylized Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a347/1MNgXaINwAg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798222",
"title": "Shared Body by Action Integration of Two Persons: Body Ownership, Sense of Agency and Task Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798222/1cJ0T8PM6qI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAAF9peSc",
"doi": "10.1109/VR50410.2021.00107",
"title": "The Effect of Feedback on Estimates of Reaching Ability in Virtual Reality",
"normalizedTitle": "The Effect of Feedback on Estimates of Reaching Ability in Virtual Reality",
"abstract": "Immersive virtual environments (VEs) are most useful for training and education when viewers perceive and act accurately within them. Judgments of action capabilities within a VE provide a good measure of perceptual fidelity - the notion of how closely perception and action in the VE match that in the real world - and can also assess how perception for action may be calibrated with visual feedback based on one's own actions. In the current study we tested judgments of action capabilities within a VE for two different reaching behaviors: reaching out and reaching up. Our goal was to assess whether feedback from actual reaching improves judgments and if any recalibration due to feedback differed across reaching behaviors. We first measured participants' actual reaching out and reaching up capabilities so that feedback trials could be scaled to their actual abilities. Participants then completed blocks of alternating perceptual adjustment and feedback trials. In adjustment trials, they adjusted a virtual target to a distance perceived to be just reachable. In feedback trials, they viewed targets that were farther or closer than their actual reach, decided whether the target was reachable, and then reached out to the target to receive visual feedback from a hand-held controller. The first feedback block manipulated the target distance to be 30% over or under actual reach and subsequent blocks decreased the deviation to 20%,10% and 5% of actual reach. We found that for both reaching behaviors, reach was initially overestimated, and then perceptual estimations decreased to become more accurate over feedback blocks. Accuracy in the feedback trials themselves showed that targets just beyond reach were more difficult to judge correctly. This study establishes a straightforward methodology that can be used for calibration of actions in VEs and has implications for applications that depend on accurate reaching within VEs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive virtual environments (VEs) are most useful for training and education when viewers perceive and act accurately within them. Judgments of action capabilities within a VE provide a good measure of perceptual fidelity - the notion of how closely perception and action in the VE match that in the real world - and can also assess how perception for action may be calibrated with visual feedback based on one's own actions. In the current study we tested judgments of action capabilities within a VE for two different reaching behaviors: reaching out and reaching up. Our goal was to assess whether feedback from actual reaching improves judgments and if any recalibration due to feedback differed across reaching behaviors. We first measured participants' actual reaching out and reaching up capabilities so that feedback trials could be scaled to their actual abilities. Participants then completed blocks of alternating perceptual adjustment and feedback trials. In adjustment trials, they adjusted a virtual target to a distance perceived to be just reachable. In feedback trials, they viewed targets that were farther or closer than their actual reach, decided whether the target was reachable, and then reached out to the target to receive visual feedback from a hand-held controller. The first feedback block manipulated the target distance to be 30% over or under actual reach and subsequent blocks decreased the deviation to 20%,10% and 5% of actual reach. We found that for both reaching behaviors, reach was initially overestimated, and then perceptual estimations decreased to become more accurate over feedback blocks. Accuracy in the feedback trials themselves showed that targets just beyond reach were more difficult to judge correctly. This study establishes a straightforward methodology that can be used for calibration of actions in VEs and has implications for applications that depend on accurate reaching within VEs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive virtual environments (VEs) are most useful for training and education when viewers perceive and act accurately within them. Judgments of action capabilities within a VE provide a good measure of perceptual fidelity - the notion of how closely perception and action in the VE match that in the real world - and can also assess how perception for action may be calibrated with visual feedback based on one's own actions. In the current study we tested judgments of action capabilities within a VE for two different reaching behaviors: reaching out and reaching up. Our goal was to assess whether feedback from actual reaching improves judgments and if any recalibration due to feedback differed across reaching behaviors. We first measured participants' actual reaching out and reaching up capabilities so that feedback trials could be scaled to their actual abilities. Participants then completed blocks of alternating perceptual adjustment and feedback trials. In adjustment trials, they adjusted a virtual target to a distance perceived to be just reachable. In feedback trials, they viewed targets that were farther or closer than their actual reach, decided whether the target was reachable, and then reached out to the target to receive visual feedback from a hand-held controller. The first feedback block manipulated the target distance to be 30% over or under actual reach and subsequent blocks decreased the deviation to 20%,10% and 5% of actual reach. We found that for both reaching behaviors, reach was initially overestimated, and then perceptual estimations decreased to become more accurate over feedback blocks. Accuracy in the feedback trials themselves showed that targets just beyond reach were more difficult to judge correctly. This study establishes a straightforward methodology that can be used for calibration of actions in VEs and has implications for applications that depend on accurate reaching within VEs.",
"fno": "255600a798",
"keywords": [
"Feedback",
"Human Computer Interaction",
"Virtual Reality",
"Visual Perception",
"Feedback Trials",
"Virtual Target",
"Visual Feedback",
"Feedback Block",
"Immersive Virtual Environments",
"Virtual Reality",
"Reaching Out",
"Judgments Of Action Capabilities",
"Reaching Behaviors",
"Reaching Up",
"Hand Held Controller",
"Training",
"Visualization",
"Three Dimensional Displays",
"Current Measurement",
"Virtual Environments",
"Estimation",
"User Interfaces",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Collaborative Interaction"
],
"authors": [
{
"affiliation": "University of Utah,USA",
"fullName": "Holly C. Gagnon",
"givenName": "Holly C.",
"surname": "Gagnon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Utah,USA",
"fullName": "Taren Rohovit",
"givenName": "Taren",
"surname": "Rohovit",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Utah,USA",
"fullName": "Hunter Finney",
"givenName": "Hunter",
"surname": "Finney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vanderbilt University,USA",
"fullName": "Yu Zhao",
"givenName": "Yu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,Riverside,USA",
"fullName": "John M. Franchak",
"givenName": "John M.",
"surname": "Franchak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Utah,USA",
"fullName": "Jeanine K. Stefanucci",
"givenName": "Jeanine K.",
"surname": "Stefanucci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vanderbilt University,USA",
"fullName": "Bobby Bodenheimer",
"givenName": "Bobby",
"surname": "Bodenheimer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Utah,USA",
"fullName": "Sarah H. Creem-Regehr",
"givenName": "Sarah H.",
"surname": "Creem-Regehr",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "798-806",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAzW798ly",
"name": "pvr202118380-09417772s1-mm_255600a798.zip",
"size": "138 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417772s1-mm_255600a798.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a788",
"articleId": "1tuAHZj29Q4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a807",
"articleId": "1tuBhUAhRQs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811056",
"title": "Effect of Proprioception Training of patient with Hemiplegia by Manipulating Visual Feedback using Virtual Reality: The Preliminary results",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811056/12OmNB8kHSv",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892292",
"title": "Corrective feedback for depth perception in CAVE-like systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892292/12OmNrNh0Ml",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460058",
"title": "Towards a comparative evaluation of visually guided physical reach motions during 3D interactions in real and virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460058/12OmNwHz09w",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260974",
"title": "Evaluating Remapped Physical Reach for Hand Interactions with Passive Haptics in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260974/13rRUwkxc5s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07457685",
"title": "Pseudo-Haptic Feedback in Teleoperation",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07457685/13rRUyYjK5o",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a612",
"title": "Investigating The Effect of Direction on The Limits of Haptic Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a612/1JrReInK5H2",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089552",
"title": "The Role of Viewing Distance and Feedback on Affordance Judgments in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089552/1jIx8sfGbSw",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2020/7397/0/739700a456",
"title": "Development of Touch Valve UI with pseudo-haptics feedback based on vibration of tablet PC",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2020/739700a456/1tGcjlaxzMs",
"parentPublication": {
"id": "proceedings/iiai-aai/2020/7397/0",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a575",
"title": "The Effect of the Virtual Object Size on Weight Perception Augmented with Pseudo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a575/1tnWwW9JGXC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a817",
"title": "Temporal Availability of Ebbinghaus Illusions on Perceiving and Interacting with 3D Objects in a Contextual Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a817/1tuAJwtLvNe",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAGepXt",
"title": "2012 IEEE 6th International Symposium on Embedded Multicore SoCs",
"acronym": "mcsoc",
"groupId": "1801959",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNApu5j7",
"doi": "10.1109/MCSoC.2012.17",
"title": "Early Stage Chick Embryonic Heart Outflow Tract Flow Measurement Using High Speed 4D Optical Coherence Tomography",
"normalizedTitle": "Early Stage Chick Embryonic Heart Outflow Tract Flow Measurement Using High Speed 4D Optical Coherence Tomography",
"abstract": "The measurement of blood-plasma absolute velocity distributions with high spatial and temporal resolution in vivo is important for research on early stage embryo heart development. We introduce a novel method to measure absolute velocity of blood flow of chicken embryo (stage HH18) outflow tract based on high speed spectral domain Optical coherence tomography. Firstly, 4D scan was performed on chicken embryo heart in vivo. Secondly, we reconstructed the 4D raw structural image data and to obtain the orientation of outflow tract at maximum expansion, thus we got the flow direction of the blood assuming flow was parallel to the blood vessel. Finally, absolute flow velocity was calculated based on the direction information and Doppler OCT message. Using this method, we compared flow velocity profiles at different positions within the embryo OFT.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The measurement of blood-plasma absolute velocity distributions with high spatial and temporal resolution in vivo is important for research on early stage embryo heart development. We introduce a novel method to measure absolute velocity of blood flow of chicken embryo (stage HH18) outflow tract based on high speed spectral domain Optical coherence tomography. Firstly, 4D scan was performed on chicken embryo heart in vivo. Secondly, we reconstructed the 4D raw structural image data and to obtain the orientation of outflow tract at maximum expansion, thus we got the flow direction of the blood assuming flow was parallel to the blood vessel. Finally, absolute flow velocity was calculated based on the direction information and Doppler OCT message. Using this method, we compared flow velocity profiles at different positions within the embryo OFT.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The measurement of blood-plasma absolute velocity distributions with high spatial and temporal resolution in vivo is important for research on early stage embryo heart development. We introduce a novel method to measure absolute velocity of blood flow of chicken embryo (stage HH18) outflow tract based on high speed spectral domain Optical coherence tomography. Firstly, 4D scan was performed on chicken embryo heart in vivo. Secondly, we reconstructed the 4D raw structural image data and to obtain the orientation of outflow tract at maximum expansion, thus we got the flow direction of the blood assuming flow was parallel to the blood vessel. Finally, absolute flow velocity was calculated based on the direction information and Doppler OCT message. Using this method, we compared flow velocity profiles at different positions within the embryo OFT.",
"fno": "4800a151",
"keywords": [
"Blood Flow",
"Doppler Effect",
"Heart",
"Embryo",
"Optical Imaging",
"Synchronization",
"Image Sequences",
"Doppler Angle",
"Flow",
"Spectral Domain",
"Optical Coherence Tomography"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhenhe Ma",
"givenName": "Zhenhe",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tao Xu",
"givenName": "Tao",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Linlin Du",
"givenName": "Linlin",
"surname": "Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhongdi Chu",
"givenName": "Zhongdi",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiangtao Lv",
"givenName": "Jiangtao",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fengwen Wang",
"givenName": "Fengwen",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mcsoc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-09-01T00:00:00",
"pubType": "proceedings",
"pages": "151-154",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2535-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4800a144",
"articleId": "12OmNC2fGze",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4800a155",
"articleId": "12OmNwKoZcR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2016/0811/0/0811a358",
"title": "Hemodynamic Modeling in a Stenosed Internal Carotid Artery",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a358/12OmNBSjIVC",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsym/2016/3438/0/07858512",
"title": "Envelope Approximation on Doppler Ultrasound Spectrogram for Estimating Flow Speed in Carotid Artery",
"doi": null,
"abstractUrl": "/proceedings-article/compsym/2016/07858512/12OmNBqv2ac",
"parentPublication": {
"id": "proceedings/compsym/2016/3438/0",
"title": "2016 International Computer Symposium (ICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b339",
"title": "A Simulation Model for Doppler Ultrasound Signals from Pulsatile Blood Flow in Stenosed Vessels",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b339/12OmNvq5jz8",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbeb/2012/4706/0/4706a939",
"title": "The Research on Doppler Ultrasonic Blood Flow Signals under Periodically Pulsatile Flow Based on STFT",
"doi": null,
"abstractUrl": "/proceedings-article/icbeb/2012/4706a939/12OmNzYwc9a",
"parentPublication": {
"id": "proceedings/icbeb/2012/4706/0",
"title": "Biomedical Engineering and Biotechnology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sensordevices/2010/4094/0/4094a236",
"title": "Influence of Alcohol Consumption on Blood Flow as Detected Using a Micro Integrated Laser Doppler Blood Flowmeter",
"doi": null,
"abstractUrl": "/proceedings-article/sensordevices/2010/4094a236/12OmNzkMlRh",
"parentPublication": {
"id": "proceedings/sensordevices/2010/4094/0",
"title": "Sensor Device Technologies and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122153",
"title": "Interactive Virtual Probing of 4D MRI Blood-Flow",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122153/13rRUNvyatf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2018/7568/0/08642768",
"title": "Three-Dimensional Ultrasound Doppler Simulation for Vascular Bifurcation Model",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2018/08642768/17QjJcpcgyk",
"parentPublication": {
"id": "proceedings/isspit/2018/7568/0",
"title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2018/7568/0/08642656",
"title": "Three-dimensional Ultrasound Doppler Simulation for Vascular Bifurcation Model",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2018/08642656/17QjJdjY2zf",
"parentPublication": {
"id": "proceedings/isspit/2018/7568/0",
"title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2018/7568/0/08642692",
"title": "Three-Dimensional Ultrasound Doppler Simulation for Vascular Bifurcation Model",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2018/08642692/17QjJdt3cJr",
"parentPublication": {
"id": "proceedings/isspit/2018/7568/0",
"title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2019/0059/0/005900a279",
"title": "Modeling of Vast Particle Movements in Microvasculature System for Blood Flow Measurement",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2019/005900a279/1dDLWoDy9iw",
"parentPublication": {
"id": "proceedings/bigdataservice/2019/0059/0",
"title": "2019 IEEE Fifth International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy4IF3s",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxiKsau",
"doi": "10.1109/ICALT.2018.00095",
"title": "Towards Design and Operationalization of Pedagogical Situations in the VRLEs",
"normalizedTitle": "Towards Design and Operationalization of Pedagogical Situations in the VRLEs",
"abstract": "Virtual reality (VR) technology has been applied in many sectors. Recent technological innovations have facilitated the access to virtual reality for anyone. VR offers new experiences to users that will make it possible to break the boundaries of formal education. But the design of educational environments named Virtual Reality Learning Environments (VRLEs) exploiting this technology is complex. We note that, because the pedagogical scenario have to be designed in the early time of VRLEs' design, it offers limited issues to teachers to adapt to new situations. We aim in this work at studying and proposing solutions to help trainers to design and spread their educational scenarios in the VRLEs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality (VR) technology has been applied in many sectors. Recent technological innovations have facilitated the access to virtual reality for anyone. VR offers new experiences to users that will make it possible to break the boundaries of formal education. But the design of educational environments named Virtual Reality Learning Environments (VRLEs) exploiting this technology is complex. We note that, because the pedagogical scenario have to be designed in the early time of VRLEs' design, it offers limited issues to teachers to adapt to new situations. We aim in this work at studying and proposing solutions to help trainers to design and spread their educational scenarios in the VRLEs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality (VR) technology has been applied in many sectors. Recent technological innovations have facilitated the access to virtual reality for anyone. VR offers new experiences to users that will make it possible to break the boundaries of formal education. But the design of educational environments named Virtual Reality Learning Environments (VRLEs) exploiting this technology is complex. We note that, because the pedagogical scenario have to be designed in the early time of VRLEs' design, it offers limited issues to teachers to adapt to new situations. We aim in this work at studying and proposing solutions to help trainers to design and spread their educational scenarios in the VRLEs.",
"fno": "604901a400",
"keywords": [
"Computer Aided Instruction",
"Virtual Reality",
"Pedagogical Situations",
"Formal Education",
"Educational Environments",
"Pedagogical Scenario",
"Virtual Reality Learning Environments",
"VRLE Design",
"Solid Modeling",
"Education",
"Adaptation Models",
"Virtual Environments",
"Tools",
"Semantics",
"Virtual Learning Environments",
"Learning Design",
"Pedagogical Scenario",
"Educational Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Oussema Mahdi",
"givenName": "Oussema",
"surname": "Mahdi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lahcen Oubahssi",
"givenName": "Lahcen",
"surname": "Oubahssi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Claudine Piau-Toffolon",
"givenName": "Claudine",
"surname": "Piau-Toffolon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sébastien Iksal",
"givenName": "Sébastien",
"surname": "Iksal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "400-402",
"year": "2018",
"issn": "2161-377X",
"isbn": "978-1-5386-6049-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "604901a395",
"articleId": "12OmNBKEyqp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "604901a403",
"articleId": "12OmNAle6R6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icime/2018/7616/0/761600a006",
"title": "Affordances of Virtual Reality for Collaborative Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a006/17D45WZZ7Bd",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiee/2021/7874/0/787400a204",
"title": "Teaching Mode of Art Design Wisdom Course Based on VR Technology",
"doi": null,
"abstractUrl": "/proceedings-article/isaiee/2021/787400a204/1BByh8CxzR6",
"parentPublication": {
"id": "proceedings/isaiee/2021/7874/0",
"title": "2021 International Symposium on Advances in Informatics, Electronics and Education (ISAIEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a344",
"title": "Teaching with a companion: the case of gravity",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a344/1FUU9ygYQNO",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a006",
"title": "Requirements Analysis and a Design Model for Educational VR Prototyping",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a006/1J7W3e34rLy",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsm/2022/5486/0/548600a076",
"title": "Virtual Reality In Education: Structural Design Of An Adaptable Virtual Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/iccsm/2022/548600a076/1JeF70S70aI",
"parentPublication": {
"id": "proceedings/iccsm/2022/5486/0",
"title": "2022 6th International Conference on Computer, Software and Modeling (ICCSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icenit/2022/6307/0/630700a093",
"title": "Research and Application of English Learning Games Based on VR technology",
"doi": null,
"abstractUrl": "/proceedings-article/icenit/2022/630700a093/1KCSKpRJIJy",
"parentPublication": {
"id": "proceedings/icenit/2022/6307/0",
"title": "2022 International Conference on Education, Network and Information Technology (ICENIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2019/3485/0/348500a344",
"title": "Assistance to Scenarisation of VR-Oriented Pedagogical Activities: Models and Tools",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2019/348500a344/1cYi3fu89m8",
"parentPublication": {
"id": "proceedings/icalt/2019/3485/2161-377X",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2019/5050/0/505000a069",
"title": "The Application of Stereo Image Generation and Composition Algorithms in Desktop-Based Virtual Reality Teaching System",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2019/505000a069/1fHkayVfcqI",
"parentPublication": {
"id": "proceedings/icvris/2019/5050/0",
"title": "2019 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cipae/2020/8223/0/822300a035",
"title": "Application and Research of Virtual Reality Technology Based on Big Data in College Teaching Field",
"doi": null,
"abstractUrl": "/proceedings-article/cipae/2020/822300a035/1rSRiSSRHGM",
"parentPublication": {
"id": "proceedings/cipae/2020/8223/0",
"title": "2020 International Conference on Computers, Information Processing and Advanced Education (CIPAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcZCgNWla",
"doi": "10.1109/VRW55335.2022.00305",
"title": "Heart-In-Hand, swapping point of view for immersive navigation in medical cardiology",
"normalizedTitle": "Heart-In-Hand, swapping point of view for immersive navigation in medical cardiology",
"abstract": "This work shows an interaction technique that allows the user to swap between egocentric and exocentric points of view while performing an anatomic navigation task inside the human heart. To achieve this, we propose a technique that furnishes the user with a natural interaction supported by two components. The first is a tangible heart representation that the user may manipulate with their hands in an exocentric view; the second is a set of manual gestures that provide locomotion actions in an egocentric view and allow the user to swap between the points of view. Two classic 3D interaction techniques inspire this work: i) World In Miniature and ii) Voodoo Dolls, extending them with natural interaction components. The preliminary tests showed that the proposed approach allows users to take advantage of an immersive first-person viewer of the heart using the third-person view to get a global point of view without missing the anatomic location inside the heart.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work shows an interaction technique that allows the user to swap between egocentric and exocentric points of view while performing an anatomic navigation task inside the human heart. To achieve this, we propose a technique that furnishes the user with a natural interaction supported by two components. The first is a tangible heart representation that the user may manipulate with their hands in an exocentric view; the second is a set of manual gestures that provide locomotion actions in an egocentric view and allow the user to swap between the points of view. Two classic 3D interaction techniques inspire this work: i) World In Miniature and ii) Voodoo Dolls, extending them with natural interaction components. The preliminary tests showed that the proposed approach allows users to take advantage of an immersive first-person viewer of the heart using the third-person view to get a global point of view without missing the anatomic location inside the heart.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work shows an interaction technique that allows the user to swap between egocentric and exocentric points of view while performing an anatomic navigation task inside the human heart. To achieve this, we propose a technique that furnishes the user with a natural interaction supported by two components. The first is a tangible heart representation that the user may manipulate with their hands in an exocentric view; the second is a set of manual gestures that provide locomotion actions in an egocentric view and allow the user to swap between the points of view. Two classic 3D interaction techniques inspire this work: i) World In Miniature and ii) Voodoo Dolls, extending them with natural interaction components. The preliminary tests showed that the proposed approach allows users to take advantage of an immersive first-person viewer of the heart using the third-person view to get a global point of view without missing the anatomic location inside the heart.",
"fno": "840200a908",
"keywords": [
"Cardiology",
"Data Visualisation",
"Interactive Systems",
"Navigation",
"User Interfaces",
"Virtual Reality",
"Heart In Hand",
"Immersive Navigation",
"Medical Cardiology",
"Anatomic Navigation Task",
"Human Heart",
"Tangible Heart Representation",
"Exocentric View",
"Egocentric View",
"3 D Interaction Techniques",
"Natural Interaction Components",
"Immersive First Person Viewer",
"Third Person View",
"Heart",
"Three Dimensional Displays",
"Navigation",
"Conferences",
"Virtual Reality",
"Manuals",
"User Interfaces",
"Human Centered Computing X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Multimedia Research Group, Universidad Militar Nueva Granada,Colombia",
"fullName": "Carlos J. Latorre-Rojas",
"givenName": "Carlos J.",
"surname": "Latorre-Rojas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Multimedia Research Group, Universidad Militar Nueva Granada,Colombia",
"fullName": "Alexander Rozo-Torres",
"givenName": "Alexander",
"surname": "Rozo-Torres",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Multimedia Research Group, Universidad Militar Nueva Granada,Colombia",
"fullName": "Laura Cortés-Rico",
"givenName": "Laura",
"surname": "Cortés-Rico",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Multimedia Research Group, Universidad Militar Nueva Granada,Colombia",
"fullName": "Wilson J. Sarmiento",
"givenName": "Wilson J.",
"surname": "Sarmiento",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "908-909",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJcYZQ90vC",
"name": "pvrw202284020-09757454s1-mm_840200a908.zip",
"size": "105 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757454s1-mm_840200a908.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a906",
"articleId": "1CJcIGUC37O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a910",
"articleId": "1CJcWqj0HZu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504771",
"title": "Comparison of mobile touch interfaces for object identification and troubleshooting tasks in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504771/12OmNAle71v",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2016/4149/0/4149a095",
"title": "A Study in Virtual Navigation Cues for Forklift Operators",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2016/4149a095/12OmNwlqhJO",
"parentPublication": {
"id": "proceedings/svr/2016/4149/0",
"title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444626",
"title": "Haptic noise cancellation: Restoring force perception in robotically-assisted beating heart surgery",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444626/12OmNxuo0kx",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2015/8873/0/8873a447",
"title": "Simplified 3D Hologram Heart Activity Monitoring Using a Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2015/8873a447/12OmNzxgHGo",
"parentPublication": {
"id": "proceedings/imis/2015/8873/0",
"title": "2015 9th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a241",
"title": "Augmented Reality Simulation of Cardiac Circulation Using APPLearn (Heart)",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a241/17D45Wt3ExJ",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699244",
"title": "Augmenting a Cardiology-Patient Doctor-Dialogue Through Integrated Heartbeat-Activated Holographic Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699244/19F1QjoSiZ2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a920",
"title": "Clean the Ocean: An Immersive VR Experience Proposing New Modifications to Go-Go and WiM Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a920/1CJettpbljW",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995496",
"title": "Genome-wide compendium of super-long noncoding RNAs during mouse heart development",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995496/1JC2CgRRaI8",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797759",
"title": "Effects of Voluntary Heart Rate Control on User Engagement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797759/1cJ1bgxatLG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2021/4261/0/09635450",
"title": "Brain-Heart Electromechanical Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2021/09635450/1zmvpkATFHG",
"parentPublication": {
"id": "proceedings/bibe/2021/4261/0",
"title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdghxxotO",
"doi": "10.1109/VRW55335.2022.00045",
"title": "A Dataset and Methodology for Self-Efficacy Feeling Prediction During Industry 4.0 VR Activity",
"normalizedTitle": "A Dataset and Methodology for Self-Efficacy Feeling Prediction During Industry 4.0 VR Activity",
"abstract": "Virtual Reality Learning Environments (VRLE) have advantages in training contexts. However, VRLE lacks of User-adaptive system which adapt scenario to the user's state. As there is a lack of multi-sensor dataset, this paper presents the IVRASED dataset collected in an industrial VRLE with the following sensors: electroencephalogram (EEG), eye-tracking (ET), galvanic skin response (GSR) and electrocardiogram (ECG). Classification of the user's state is performed with a deep learning architecture and the results show an accuracy of 77.8% for the best sensors combination.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality Learning Environments (VRLE) have advantages in training contexts. However, VRLE lacks of User-adaptive system which adapt scenario to the user's state. As there is a lack of multi-sensor dataset, this paper presents the IVRASED dataset collected in an industrial VRLE with the following sensors: electroencephalogram (EEG), eye-tracking (ET), galvanic skin response (GSR) and electrocardiogram (ECG). Classification of the user's state is performed with a deep learning architecture and the results show an accuracy of 77.8% for the best sensors combination.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality Learning Environments (VRLE) have advantages in training contexts. However, VRLE lacks of User-adaptive system which adapt scenario to the user's state. As there is a lack of multi-sensor dataset, this paper presents the IVRASED dataset collected in an industrial VRLE with the following sensors: electroencephalogram (EEG), eye-tracking (ET), galvanic skin response (GSR) and electrocardiogram (ECG). Classification of the user's state is performed with a deep learning architecture and the results show an accuracy of 77.8% for the best sensors combination.",
"fno": "840200a176",
"keywords": [
"Computer Aided Instruction",
"Electrocardiography",
"Electroencephalography",
"Learning Artificial Intelligence",
"Medical Signal Processing",
"Skin",
"Virtual Reality",
"VR Activity",
"Virtual Reality Learning Environments",
"Training Contexts",
"User Adaptive System",
"Multisensor Dataset",
"IVRASED Dataset",
"Industrial VRLE",
"Electrocardiogram",
"Deep Learning Architecture",
"Sensors Combination",
"Self Efficacy Feeling Prediction",
"Deep Learning",
"Solid Modeling",
"Conferences",
"Virtual Reality",
"Electrocardiography",
"Sensor Phenomena And Characterization",
"Brain Modeling",
"VRLE",
"Dataset",
"Industry 4 0",
"Sensor",
"Self Efficacy",
"EEG",
"ECG",
"GSR",
"Eye Tracking",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Computing Methodologies X 2014 Machine Learning X 2014 Learning Paradigms X 2014 Supervised Learning X 2014 Supervised Learning By Classification",
"Applied Computing X 2014 Education X 2014 Interactive Learning Environments"
],
"authors": [
{
"affiliation": "LINEACT CESI,Rouen,France",
"fullName": "Thibaud Bounhar",
"givenName": "Thibaud",
"surname": "Bounhar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRSEEM / ESIGELEC,Rouen,France",
"fullName": "Zaher Yamak",
"givenName": "Zaher",
"surname": "Yamak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LINEACT CESI,Rouen,France",
"fullName": "Vincent Havard",
"givenName": "Vincent",
"surname": "Havard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LINEACT CESI,Rouen,France",
"fullName": "David Baudry",
"givenName": "David",
"surname": "Baudry",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "176-182",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a169",
"articleId": "1CJflJEIyKk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a183",
"articleId": "1CJcEwk2ylO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vhcie/2016/0829/0/07563568",
"title": "Feeling crowded yet?: crowd simulations for VR",
"doi": null,
"abstractUrl": "/proceedings-article/vhcie/2016/07563568/12OmNy2Jt9L",
"parentPublication": {
"id": "proceedings/vhcie/2016/0829/0",
"title": "2016 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/02/08554112",
"title": "AMIGOS: A Dataset for Affect, Personality and Mood Research on Individuals and Groups",
"doi": null,
"abstractUrl": "/journal/ta/2021/02/08554112/17D45XoXP5b",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdatasecurity-hpsc-ids/2022/8069/0/806900a180",
"title": "Toward a BCI-Based Personalized Recommender System Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/bigdatasecurity-hpsc-ids/2022/806900a180/1EykIOXY3Pa",
"parentPublication": {
"id": "proceedings/bigdatasecurity-hpsc-ids/2022/8069/0",
"title": "2022 IEEE 8th Intl Conference on Big Data Security on Cloud (BigDataSecurity), IEEE Intl Conference on High Performance and Smart Computing, (HPSC) and IEEE Intl Conference on Intelligent Data and Security (IDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2022/9792/0/09913012",
"title": "Towards Privacy-Preserving Neural Architecture Search",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2022/09913012/1HBK3j5F5PG",
"parentPublication": {
"id": "proceedings/iscc/2022/9792/0",
"title": "2022 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/oj/2022/01/09996123",
"title": "Optimized UAV Trajectory and Transceiver Design for Over-the-Air Computation Systems",
"doi": null,
"abstractUrl": "/journal/oj/2022/01/09996123/1JilRiqOb16",
"parentPublication": {
"id": "trans/oj",
"title": "IEEE Open Journal of the Computer Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991677",
"title": "Study on the Influence of Complex Terrain on Optimal Sensor Layout",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991677/1JwPYEHDjEc",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2022/7172/0/717200a159",
"title": "A smartphone app to collect emotion-labeled signals in the wild using a body sensor network",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2022/717200a159/1KaHKaflLy0",
"parentPublication": {
"id": "proceedings/ism/2022/7172/0",
"title": "2022 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10086000",
"title": "Preliminary Study on the Transition of Bio-emotion using Aroma Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10086000/1M665PIA7L2",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a286",
"title": "Animation Fidelity in Self-Avatars: Impact on User Performance and Sense of Agency",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a286/1MNgTwnoSUE",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/05/09645232",
"title": "Towards Nonintrusive and Secure Mobile Two-Factor Authentication on Wearables",
"doi": null,
"abstractUrl": "/journal/tm/2023/05/09645232/1zc6yZ2aX1C",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1DSyxnQQtaw",
"title": "2021 International Conference on Computing Sciences (ICCS)",
"acronym": "iccs",
"groupId": "1802098",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1DSyzDIy0Tu",
"doi": "10.1109/ICCS54944.2021.00049",
"title": "Machine Learning-Based Heart Patient Scanning, Visualization, and Monitoring",
"normalizedTitle": "Machine Learning-Based Heart Patient Scanning, Visualization, and Monitoring",
"abstract": "Heart diseases leading most causes of death globally according to World Health Organization cardiovascular or all heart related disease are responsible for 17.9 million death every year. An early detection and diagnosis of the disease is very important and maybe it's the key of cure. The major challenge is to predict the disease in early stages therefor most of scientists and researches focus on Machine learning techniques which have the capability of detection with accurate result for large and complex data and apply those techniques to help in health care. The purpose of this work is to detect heart diseases at early stage and avoid consequences by implementing different Machine Learning Algorithm for example, KNN Decision Tree (DT), Logistic Regression, SVM, Random Forest (RF), and Naïve Bayes (NB).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Heart diseases leading most causes of death globally according to World Health Organization cardiovascular or all heart related disease are responsible for 17.9 million death every year. An early detection and diagnosis of the disease is very important and maybe it's the key of cure. The major challenge is to predict the disease in early stages therefor most of scientists and researches focus on Machine learning techniques which have the capability of detection with accurate result for large and complex data and apply those techniques to help in health care. The purpose of this work is to detect heart diseases at early stage and avoid consequences by implementing different Machine Learning Algorithm for example, KNN Decision Tree (DT), Logistic Regression, SVM, Random Forest (RF), and Naïve Bayes (NB).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Heart diseases leading most causes of death globally according to World Health Organization cardiovascular or all heart related disease are responsible for 17.9 million death every year. An early detection and diagnosis of the disease is very important and maybe it's the key of cure. The major challenge is to predict the disease in early stages therefor most of scientists and researches focus on Machine learning techniques which have the capability of detection with accurate result for large and complex data and apply those techniques to help in health care. The purpose of this work is to detect heart diseases at early stage and avoid consequences by implementing different Machine Learning Algorithm for example, KNN Decision Tree (DT), Logistic Regression, SVM, Random Forest (RF), and Naïve Bayes (NB).",
"fno": "944500a212",
"keywords": [
"Cardiovascular System",
"Decision Trees",
"Diseases",
"Health Care",
"Learning Artificial Intelligence",
"Patient Diagnosis",
"Patient Monitoring",
"Regression Analysis",
"Support Vector Machines",
"Machine Learning Techniques",
"Health Care",
"Heart Diseases",
"World Health Organization",
"Machine Learning Based Heart Patient Scanning",
"Heart",
"Support Vector Machines",
"Machine Learning Algorithms",
"Organizations",
"Medical Services",
"Prediction Algorithms",
"Random Forests",
"Machine Learning",
"Cardiovascular Disease",
"Decision Tree",
"Heart Disease Prediction"
],
"authors": [
{
"affiliation": "Lovely Professional University Jalandhar,Department of Machine Learning and Arterial intelligence,Phagwara,Punjab,India",
"fullName": "Ahmed Al Ahdal",
"givenName": "Ahmed Al",
"surname": "Ahdal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lovely Professional University Jalandhar,Department of Computer Science and Engineering,Phagwara,Punjab,India",
"fullName": "Deepak Prashar",
"givenName": "Deepak",
"surname": "Prashar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lovely Professional University Jalandhar,Department of Computer Science and Engineering,Phagwara,Punjab,India",
"fullName": "Manik Rakhra",
"givenName": "Manik",
"surname": "Rakhra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lovely Professional University Jalandhar,Department of Computer Science and Engineering,Phagwara,Punjab,India",
"fullName": "Ankita Wadhawan",
"givenName": "Ankita",
"surname": "Wadhawan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "212-215",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-9445-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "944500a206",
"articleId": "1DSyxxWaMIE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "944500a216",
"articleId": "1DSyygi8ERG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2022/6845/0/684500a357",
"title": "Electro-Mechanical Data Fusion for Heart Health Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2022/684500a357/1GvdBnIfCKs",
"parentPublication": {
"id": "proceedings/ichi/2022/6845/0",
"title": "2022 IEEE 10th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2022/7952/0/795200a020",
"title": "SmartCare: Detecting Heart Failure and Diabetes Using Smartwatch",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2022/795200a020/1Gvdn85OviE",
"parentPublication": {
"id": "proceedings/smartiot/2022/7952/0",
"title": "2022 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2022/8474/0/847400a315",
"title": "Research of Heart Disease Prediction Based on Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2022/847400a315/1IlO8E1UhTq",
"parentPublication": {
"id": "proceedings/aemcse/2022/8474/0",
"title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlcss/2022/5493/0/549300a028",
"title": "Heart Disease Prediction Using Feature Selection and Machine Learning Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/mlcss/2022/549300a028/1LSOXa8rSNy",
"parentPublication": {
"id": "proceedings/mlcss/2022/5493/0",
"title": "2022 International Conference on Machine Learning, Computer Systems and Security (MLCSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09005488",
"title": "Towards comparing and using Machine Learning techniques for detecting and predicting Heart Attack and Diseases",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09005488/1hJsaaRMGlO",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09333884",
"title": "DMHZ: A Decision Support System Based on Machine Computational Design for Heart Disease Diagnosis Using Z-Alizadeh Sani Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09333884/1qTrUusPqjm",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378232",
"title": "Automatic Multimodal Heart Disease Classification using Phonocardiogram Signal",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378232/1s64bIaNpCg",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2020/0378/0/037800a630",
"title": "Heart disease prediction based on random forest and LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2020/037800a630/1tpBg5iPcL6",
"parentPublication": {
"id": "proceedings/itca/2020/0378/0",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsecs-icocsim/2021/1407/0/140700a620",
"title": "Accuracy and performance analysis for classification algorithms based on biomedical datasets",
"doi": null,
"abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700a620/1wYlrF3E2wE",
"parentPublication": {
"id": "proceedings/icsecs-icocsim/2021/1407/0",
"title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsecs-icocsim/2021/1407/0/140700a232",
"title": "Machine-Learning-Based Prediction Models of Coronary Heart Disease Using Naïve Bayes and Random Forest Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700a232/1wYlzM3HkeQ",
"parentPublication": {
"id": "proceedings/icsecs-icocsim/2021/1407/0",
"title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1FUU5pAuu8E",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1FUU9q5JFmw",
"doi": "10.1109/ICALT55010.2022.00105",
"title": "Mechanism to Capture Learners’ Interactions in Virtual Reality Learning Environment",
"normalizedTitle": "Mechanism to Capture Learners’ Interactions in Virtual Reality Learning Environment",
"abstract": "Virtual Reality (VR) has the potential to improve learning in the education domain due to its characteristics such as multi-sensory stimuli, immersion, interaction, first-person perspectives, etc. In spite of these advantages, the literature analysis carried out has revealed that there is no literature that explored the learning processes happening in a VR environment. The learning processes can be analyzed using the data collected from the interaction behavior happening in a VR learning environment (VRLE). The existing data collection mechanisms in VR such as questionnaires, surveys, physiological sensors, and human observers do not provide data related to the interaction behavior of the learners happening in VR head-mounted displays (HMD). Hence, in this paper, we discuss a mechanism developed to log the interaction data happening in VRLE automatically in real-time and store them in a locally accessible location. The logged interaction data can be further processed to extract features and study the learning processes in VRLE.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) has the potential to improve learning in the education domain due to its characteristics such as multi-sensory stimuli, immersion, interaction, first-person perspectives, etc. In spite of these advantages, the literature analysis carried out has revealed that there is no literature that explored the learning processes happening in a VR environment. The learning processes can be analyzed using the data collected from the interaction behavior happening in a VR learning environment (VRLE). The existing data collection mechanisms in VR such as questionnaires, surveys, physiological sensors, and human observers do not provide data related to the interaction behavior of the learners happening in VR head-mounted displays (HMD). Hence, in this paper, we discuss a mechanism developed to log the interaction data happening in VRLE automatically in real-time and store them in a locally accessible location. The logged interaction data can be further processed to extract features and study the learning processes in VRLE.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) has the potential to improve learning in the education domain due to its characteristics such as multi-sensory stimuli, immersion, interaction, first-person perspectives, etc. In spite of these advantages, the literature analysis carried out has revealed that there is no literature that explored the learning processes happening in a VR environment. The learning processes can be analyzed using the data collected from the interaction behavior happening in a VR learning environment (VRLE). The existing data collection mechanisms in VR such as questionnaires, surveys, physiological sensors, and human observers do not provide data related to the interaction behavior of the learners happening in VR head-mounted displays (HMD). Hence, in this paper, we discuss a mechanism developed to log the interaction data happening in VRLE automatically in real-time and store them in a locally accessible location. The logged interaction data can be further processed to extract features and study the learning processes in VRLE.",
"fno": "951900a335",
"keywords": [
"Computer Aided Instruction",
"Helmet Mounted Displays",
"Virtual Reality",
"Interaction Behavior",
"VR Head Mounted Displays",
"Interaction Data",
"Learning Processes",
"Capture Learners",
"Virtual Reality Learning Environment",
"Education Domain",
"Multisensory Stimuli",
"First Person Perspectives",
"Literature Analysis",
"VR Environment",
"VR Learning Environment",
"Existing Data Collection Mechanisms",
"Solid Modeling",
"Virtual Reality",
"Resists",
"Feature Extraction",
"Real Time Systems",
"Physiology",
"Behavioral Sciences",
"Virtual Reality",
"Learning Process",
"Behavior",
"Interaction Data"
],
"authors": [
{
"affiliation": "Indian Institute of Technology Bombay,Interdisciplinary Programme in Educational Technology,Mumbai,India",
"fullName": "Antony Prakash",
"givenName": "Antony",
"surname": "Prakash",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Bombay,Interdisciplinary Programme in Educational Technology,Mumbai,India",
"fullName": "Ramkumar Rajendran",
"givenName": "Ramkumar",
"surname": "Rajendran",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "335-337",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9519-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "951900a330",
"articleId": "1FUU6YclSW4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "951900a338",
"articleId": "1FUUdkEP6SI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/02/07833028",
"title": "Augmented Reality versus Virtual Reality for 3D Object Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2018/02/07833028/13rRUwInvsX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a946",
"title": "Exploring the Effects of Multimedia Design in a Life English VR Serious Game",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a946/19m3JG8atEI",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a812",
"title": "Tangiball: Foot-Enabled Embodied Tangible Interaction with a Ball in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a812/1CJczvrAl0Y",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a408",
"title": "Immersive Virtual Reality Environments: a proposal to enhance preservice teacher’s communicative competences",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a408/1FUUcqD273a",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a152",
"title": "Examining of Learners’ Dashboard Interaction in Computer Classification Testing Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a152/1FUUfw7K8BG",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a397",
"title": "Modeling and optimizing the voice assistant behavior in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a397/1J7WhI0xeBq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a180",
"title": "Behavioral Avoidance Test: Comparison between in vivo and virtual reality using questionnaires and psychophysiology",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a180/1KmF8b3dAY0",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2022/5305/0/10089221",
"title": "Virtual Reality Stores: Studies on Shopper Perceptions and Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2022/10089221/1M7LeF7YAco",
"parentPublication": {
"id": "proceedings/csde/2022/5305/0",
"title": "2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798369",
"title": "Brain Activity in Virtual Reality: Assessing Signal Quality of High-Resolution EEG While Using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798369/1cJ18Pncw9y",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09531381",
"title": "A Survey on Affective and Cognitive VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09531381/1wJl1nWksQo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cYi06q10li",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cYi3fu89m8",
"doi": "10.1109/ICALT.2019.00107",
"title": "Assistance to Scenarisation of VR-Oriented Pedagogical Activities: Models and Tools",
"normalizedTitle": "Assistance to Scenarisation of VR-Oriented Pedagogical Activities: Models and Tools",
"abstract": "Although VRLEs (virtual reality learning environments) place the learner of a pedagogical situation in a virtual reality environment, they are dependent on a particular field or context and do not allow teachers to define or adapt their models of scenario. To help teachers in designing and generating VRLE adapted to their needs, we aim at defining a process for the design and production of VRLE and a scenario's model. We develop an editor allowing the specification of scenarios and pedagogical activities based on VR-oriented pedagogical objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although VRLEs (virtual reality learning environments) place the learner of a pedagogical situation in a virtual reality environment, they are dependent on a particular field or context and do not allow teachers to define or adapt their models of scenario. To help teachers in designing and generating VRLE adapted to their needs, we aim at defining a process for the design and production of VRLE and a scenario's model. We develop an editor allowing the specification of scenarios and pedagogical activities based on VR-oriented pedagogical objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although VRLEs (virtual reality learning environments) place the learner of a pedagogical situation in a virtual reality environment, they are dependent on a particular field or context and do not allow teachers to define or adapt their models of scenario. To help teachers in designing and generating VRLE adapted to their needs, we aim at defining a process for the design and production of VRLE and a scenario's model. We develop an editor allowing the specification of scenarios and pedagogical activities based on VR-oriented pedagogical objects.",
"fno": "348500a344",
"keywords": [
"Computer Aided Instruction",
"Virtual Reality",
"VRLE",
"VR Oriented Pedagogical Activities",
"VR Oriented Pedagogical Objects",
"Virtual Reality Environment",
"Pedagogical Situation",
"Virtual Reality Learning Environments",
"Solid Modeling",
"Adaptation Models",
"Virtual Environments",
"Tools",
"Prototypes",
"Education",
"VRLE",
"TEL",
"Pedagogical Activity",
"Learning Scenario"
],
"authors": [
{
"affiliation": "Le Mans Universite",
"fullName": "Oussema Mahdi",
"givenName": "Oussema",
"surname": "Mahdi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Le Mans Universite",
"fullName": "Lahcen Oubahssi",
"givenName": "Lahcen",
"surname": "Oubahssi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Le Mans Universite",
"fullName": "Claudine Piau-Toffolon",
"givenName": "Claudine",
"surname": "Piau-Toffolon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Le Mans Universite",
"fullName": "Sébastien Iksal",
"givenName": "Sébastien",
"surname": "Iksal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "344-346",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3485-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "348500a337",
"articleId": "1cYi0BDk7ba",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "348500a017",
"articleId": "1cYi3oYh0RO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2015/7334/0/7334a211",
"title": "Formalization of Recurrent Uses of e-Learning Tools as Reusable Pedagogical Activities",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a211/12OmNASraJR",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044406",
"title": "Examining and mapping CS teachers' technological, pedagogical and content knowledge (TPACK) in K-12 schools",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044406/12OmNrFTr3Q",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a400",
"title": "Towards Design and Operationalization of Pedagogical Situations in the VRLEs",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a400/12OmNxiKsau",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2013/5009/0/5009a304",
"title": "Patterns, Pedagogical Design Schemes and Process for Instructional Design",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a304/12OmNywxlQZ",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2007/2801/0/04148831",
"title": "Pedagogical Agents for Teacher Intervention in Educational Robotics Classes: Implementation Issues",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2007/04148831/12OmNzSQdq3",
"parentPublication": {
"id": "proceedings/digitel/2007/2801/0",
"title": "2007 IEEE International Workshop on Digital Games and Intelligent Toys-based Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699257",
"title": "Toward More Believable VR by Smooth Transition Between Real and Virtual Environments via Omnidirectional Video",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699257/19F1S5KRwg8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a176",
"title": "A Dataset and Methodology for Self-Efficacy Feeling Prediction During Industry 4.0 VR Activity",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a176/1CJdghxxotO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a682",
"title": "Geometric simplification for reducing optic flow in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a682/1J7WqYsXIuA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/06/09580681",
"title": "Modeling and Defense of Social Virtual Reality Attacks Inducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tq/2022/06/09580681/1xPo5KfQN1K",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ficloud/2021/2574/0/257400a350",
"title": "Rule-based Adaptations to Control Cybersickness in Social Virtual Reality Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ficloud/2021/257400a350/1yovf0AziKI",
"parentPublication": {
"id": "proceedings/ficloud/2021/2574/0",
"title": "2021 8th International Conference on Future Internet of Things and Cloud (FiCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pBMmiabSDK",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"acronym": "bibe",
"groupId": "1000075",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMo0ETfgs",
"doi": "10.1109/BIBE50027.2020.00111",
"title": "Predicting the Change in State of the Human Heart Based on Synthetic Heart Chamber Volume Data",
"normalizedTitle": "Predicting the Change in State of the Human Heart Based on Synthetic Heart Chamber Volume Data",
"abstract": "Cardiovascular Disease (CVD) is one of the most detrimental health issues the world experiences each year. Approximately 647,000 people die from CVD each year in America according to the Centers for Disease Control and Prevention. In other words, one person every 37 seconds. Globally, 17.9 million people die from CVD each year as reported by the World Health Organization. Financially, America spends approximately Z_$219 billion each year. The American Heart Association estimates the financial cost to reach $_Z1.1 trillion by 2035. These metrics show the need to continue research in aiding individuals affected with CVD. In this paper, we use the theory of our previous work, a wearable ultrasound vest to create a real-time near 3D model of the heart, to create the beginnings of a heart state prediction system. That is, we create a synthetic dataset using the ranges of normal and abnormal heart chamber volumes to calculate external surface areas and generate data via statistical bootstrapping. Then, feed this system into a Machine Learning algorithm called a Constrained State Preserved Extreme Learning Machine (CSPELM). Our results show that we can differentiate between abnormal data (Atrial Fibrillation, Chronic Mitral Regurgitation, and Post-Myocardial Infarction) from normal data, where higher percentages for abnormal and low percentages for normal is the goal, with CSPELM predictions of 60.28-88.33% to 33.61%, respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cardiovascular Disease (CVD) is one of the most detrimental health issues the world experiences each year. Approximately 647,000 people die from CVD each year in America according to the Centers for Disease Control and Prevention. In other words, one person every 37 seconds. Globally, 17.9 million people die from CVD each year as reported by the World Health Organization. Financially, America spends approximately $219 billion each year. The American Heart Association estimates the financial cost to reach $1.1 trillion by 2035. These metrics show the need to continue research in aiding individuals affected with CVD. In this paper, we use the theory of our previous work, a wearable ultrasound vest to create a real-time near 3D model of the heart, to create the beginnings of a heart state prediction system. That is, we create a synthetic dataset using the ranges of normal and abnormal heart chamber volumes to calculate external surface areas and generate data via statistical bootstrapping. Then, feed this system into a Machine Learning algorithm called a Constrained State Preserved Extreme Learning Machine (CSPELM). Our results show that we can differentiate between abnormal data (Atrial Fibrillation, Chronic Mitral Regurgitation, and Post-Myocardial Infarction) from normal data, where higher percentages for abnormal and low percentages for normal is the goal, with CSPELM predictions of 60.28-88.33% to 33.61%, respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cardiovascular Disease (CVD) is one of the most detrimental health issues the world experiences each year. Approximately 647,000 people die from CVD each year in America according to the Centers for Disease Control and Prevention. In other words, one person every 37 seconds. Globally, 17.9 million people die from CVD each year as reported by the World Health Organization. Financially, America spends approximately -1.1 trillion by 2035. These metrics show the need to continue research in aiding individuals affected with CVD. In this paper, we use the theory of our previous work, a wearable ultrasound vest to create a real-time near 3D model of the heart, to create the beginnings of a heart state prediction system. That is, we create a synthetic dataset using the ranges of normal and abnormal heart chamber volumes to calculate external surface areas and generate data via statistical bootstrapping. Then, feed this system into a Machine Learning algorithm called a Constrained State Preserved Extreme Learning Machine (CSPELM). Our results show that we can differentiate between abnormal data (Atrial Fibrillation, Chronic Mitral Regurgitation, and Post-Myocardial Infarction) from normal data, where higher percentages for abnormal and low percentages for normal is the goal, with CSPELM predictions of 60.28-88.33% to 33.61%, respectively.",
"fno": "957400a655",
"keywords": [
"Cardiology",
"Cardiovascular System",
"Diseases",
"Learning Artificial Intelligence",
"Medical Computing",
"Solid Modelling",
"Statistical Analysis",
"World Health Organization",
"American Heart Association",
"Financial Cost",
"Wearable Ultrasound Vest",
"Heart State Prediction System",
"Synthetic Dataset",
"Normal Heart Chamber Volumes",
"Abnormal Heart Chamber Volumes",
"Machine Learning Algorithm",
"Abnormal Data",
"Normal Data",
"CSPELM Predictions",
"Synthetic Heart Chamber Volume Data",
"Constrained State Preserved Extreme Learning Machine",
"Detrimental Health Issues",
"CVD",
"Cardiovascular Disease",
"Human Heart",
"Time 37 0 S",
"Heart",
"Solid Modeling",
"Ultrasonic Imaging",
"Three Dimensional Displays",
"Organizations",
"Predictive Models",
"Real Time Systems",
"Machine Learning",
"Heart",
"CSPELM",
"Heart State",
"Heart Chamber",
"Prediction",
"Cardiovascular Disease"
],
"authors": [
{
"affiliation": "Wright State University,Dayton,United States",
"fullName": "Garrett Goodman",
"givenName": "Garrett",
"surname": "Goodman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wright State University,CART Center,Dayton,United States",
"fullName": "Nikolaos Bourbakis",
"givenName": "Nikolaos",
"surname": "Bourbakis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibe",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "655-661",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9574-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "957400a648",
"articleId": "1pBMsJZ3nTW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "957400a662",
"articleId": "1pBMuA0pimI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2013/4932/0/4932a985",
"title": "Segmentation of Heart Sound Using Double-Threshold",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2013/4932a985/12OmNBghtsi",
"parentPublication": {
"id": "proceedings/icmtma/2013/4932/0",
"title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cic/1989/2114/0/00130474",
"title": "Transesophageal echo computer tomography: a new method for dynamic 3-D imaging of the heart (echo-CT)",
"doi": null,
"abstractUrl": "/proceedings-article/cic/1989/00130474/12OmNC3XhxN",
"parentPublication": {
"id": "proceedings/cic/1989/2114/0",
"title": "Proceedings Computers in Cardiology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028470",
"title": "3-D heart image reconstructed from MRI data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028470/12OmNzsrwcX",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccs/2021/9445/0/944500a228",
"title": "Heart Disease Detection System Using Gradient Boosting Technique",
"doi": null,
"abstractUrl": "/proceedings-article/iccs/2021/944500a228/1DSyCtErtkI",
"parentPublication": {
"id": "proceedings/iccs/2021/9445/0",
"title": "2021 International Conference on Computing Sciences (ICCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccs/2021/9445/0/944500a212",
"title": "Machine Learning-Based Heart Patient Scanning, Visualization, and Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/iccs/2021/944500a212/1DSyzDIy0Tu",
"parentPublication": {
"id": "proceedings/iccs/2021/9445/0",
"title": "2021 International Conference on Computing Sciences (ICCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09005488",
"title": "Towards comparing and using Machine Learning techniques for detecting and predicting Heart Attack and Diseases",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09005488/1hJsaaRMGlO",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400a910",
"title": "Diagnosing Heart Disease Types from Chest X-Rays Using a Deep Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400a910/1jdE09LwR1u",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378460",
"title": "Development of an Explainable Prediction Model of Heart Failure Survival by Using Ensemble Trees",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378460/1s64Rvyie3u",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2020/0378/0/037800a630",
"title": "Heart disease prediction based on random forest and LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2020/037800a630/1tpBg5iPcL6",
"parentPublication": {
"id": "proceedings/itca/2020/0378/0",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2021/3960/0/396000a355",
"title": "Multi-label classification of heart sound signals",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2021/396000a355/1xqyRkAdqJq",
"parentPublication": {
"id": "proceedings/icceai/2021/3960/0",
"title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1SZ9ch0I",
"doi": "10.1109/ISMAR-Adjunct.2018.00057",
"title": "Is That Me?—Embodiment and Body Perception with an Augmented Reality Mirror",
"normalizedTitle": "Is That Me?—Embodiment and Body Perception with an Augmented Reality Mirror",
"abstract": "Virtual reality has been used intensively to study embodiment and body perception, in particular for research purposes in psychological domains. Virtual avatars are used to resemble users' appearance and to implement interactively simulated behaviour. To make this a realistic and believable experience users should feel embodiment, i.e. ownership, agency, and self-location/presence. State-of-the-art capture and display technologies allow for extending virtual reality embodiment to the realm of augmented reality for higher efficacy-instead of seeing a virtual reality body one would see a captured, 3D representation of their own body naturally controlled by their real body movements within the context of the present real environment. However, it is unclear whether users would experience embodiment with their augmented reality avatar and whether findings from virtual reality targeting body perception can be replicated. Here we present an augmented reality system comprising a 3D point cloud capturing system (Microsoft Kinect) and an optical see-through head-mounted display (Microsoft HoloLens), both connected to a purpose-developed application displaying a user's body in a virtual 3D mirror embedded into the real environment. In a study with 24 participants, we evaluated embodiment and body weight perception as a proof of concept. This is based on a similar study conducted in Virtual Reality. Our findings show that users experience ownership and agency with the mirrored body and that body weight perception in virtual and augmented reality systems is similar.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality has been used intensively to study embodiment and body perception, in particular for research purposes in psychological domains. Virtual avatars are used to resemble users' appearance and to implement interactively simulated behaviour. To make this a realistic and believable experience users should feel embodiment, i.e. ownership, agency, and self-location/presence. State-of-the-art capture and display technologies allow for extending virtual reality embodiment to the realm of augmented reality for higher efficacy-instead of seeing a virtual reality body one would see a captured, 3D representation of their own body naturally controlled by their real body movements within the context of the present real environment. However, it is unclear whether users would experience embodiment with their augmented reality avatar and whether findings from virtual reality targeting body perception can be replicated. Here we present an augmented reality system comprising a 3D point cloud capturing system (Microsoft Kinect) and an optical see-through head-mounted display (Microsoft HoloLens), both connected to a purpose-developed application displaying a user's body in a virtual 3D mirror embedded into the real environment. In a study with 24 participants, we evaluated embodiment and body weight perception as a proof of concept. This is based on a similar study conducted in Virtual Reality. Our findings show that users experience ownership and agency with the mirrored body and that body weight perception in virtual and augmented reality systems is similar.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality has been used intensively to study embodiment and body perception, in particular for research purposes in psychological domains. Virtual avatars are used to resemble users' appearance and to implement interactively simulated behaviour. To make this a realistic and believable experience users should feel embodiment, i.e. ownership, agency, and self-location/presence. State-of-the-art capture and display technologies allow for extending virtual reality embodiment to the realm of augmented reality for higher efficacy-instead of seeing a virtual reality body one would see a captured, 3D representation of their own body naturally controlled by their real body movements within the context of the present real environment. However, it is unclear whether users would experience embodiment with their augmented reality avatar and whether findings from virtual reality targeting body perception can be replicated. Here we present an augmented reality system comprising a 3D point cloud capturing system (Microsoft Kinect) and an optical see-through head-mounted display (Microsoft HoloLens), both connected to a purpose-developed application displaying a user's body in a virtual 3D mirror embedded into the real environment. In a study with 24 participants, we evaluated embodiment and body weight perception as a proof of concept. This is based on a similar study conducted in Virtual Reality. Our findings show that users experience ownership and agency with the mirrored body and that body weight perception in virtual and augmented reality systems is similar.",
"fno": "08699238",
"keywords": [
"Augmented Reality",
"Avatars",
"Helmet Mounted Displays",
"Psychology",
"Virtual Reality Body",
"Head Mounted Display",
"3 D Point Cloud Capturing System",
"Virtual Avatars",
"Augmented Reality System",
"Virtual Reality Embodiment",
"Display Technologies",
"Realistic Experience Users",
"Interactively Simulated Behaviour",
"Body Weight Perception",
"Virtual 3 D Mirror",
"Augmented Reality Avatar",
"Body Movements",
"3 D Representation",
"Augmented Reality",
"Ownership",
"Agency",
"Presence",
"Mixed Reality",
"Optical See Through Displays",
"Self Location",
"H 5 1 Information Interfaces And Presentation E G HCI Multimedia Information Systems X 2014 Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "University of Otago",
"fullName": "Chontira Nimcharoen",
"givenName": "Chontira",
"surname": "Nimcharoen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Stefanie Zollmann",
"givenName": "Stefanie",
"surname": "Zollmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Jonny Collins",
"givenName": "Jonny",
"surname": "Collins",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Holger Regenbrecht",
"givenName": "Holger",
"surname": "Regenbrecht",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "158-163",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699316",
"articleId": "19F1UA1hw40",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699254",
"articleId": "19F1NG6YVO0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ct/1997/8084/0/80840012",
"title": "The Cyborg's Dilemma: Embodiment in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ct/1997/80840012/12OmNx38vVh",
"parentPublication": {
"id": "proceedings/ct/1997/8084/0",
"title": "Cognitive Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a350",
"title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049718",
"title": "Body and Time: Virtual Embodiment and its Effect on Time Perception",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049718/1KYom5qAQo0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2019/5434/0/543400a001",
"title": "Providing Sense of Embodiment with Mobile Virtual Reality Devices: A Case Study using the "Lamp-Head's Laboratory" Animation",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2019/543400a001/1fHjwG6Gnug",
"parentPublication": {
"id": "proceedings/svr/2019/5434/0",
"title": "2019 21st Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089604",
"title": "Examining Whether Secondary Effects of Temperature-Associated Virtual Stimuli Influence Subjective Perception of Duration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089604/1jIxg8GOOUo",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a462",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a065",
"title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysu9tPcGc",
"doi": "10.1109/ISMAR50242.2020.00071",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"normalizedTitle": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"abstract": "The appearance of avatars can potentially alter changes in their users’ perception and behavior. Based on this finding, approaches to support the therapy of body perception disturbances in eating or body weight disorders by mixed reality (MR) systems gain in importance. However, the methodological heterogeneity of previous research has made it difficult to assess the suitability of different MR systems for therapeutic use in these areas. The effects of MR system properties and related psychometric factors on body-related perceptions have so far remained unclear. We developed an interactive virtual mirror embodiment application to investigate the differences between an augmented reality see-through head-mounted-display (HMD) and a virtual reality HMD on the before-mentioned factors. Additionally, we considered the influence of the participant’s body-mass-index (BMI) and the BMI difference between participants and their avatars on the estimations. The 54 normal-weight female participants significantly underestimated the weight of their photorealistic, generic avatar in both conditions. Body weight estimations were significantly predicted by the participants’ BMI and the BMI difference. We also observed partially significant differences in presence and tendencies for differences in virtual body ownership between the systems. Our results offer new insights into the relationships of body weight perception in different MR environments and provide new perspectives for the development of therapeutic applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The appearance of avatars can potentially alter changes in their users’ perception and behavior. Based on this finding, approaches to support the therapy of body perception disturbances in eating or body weight disorders by mixed reality (MR) systems gain in importance. However, the methodological heterogeneity of previous research has made it difficult to assess the suitability of different MR systems for therapeutic use in these areas. The effects of MR system properties and related psychometric factors on body-related perceptions have so far remained unclear. We developed an interactive virtual mirror embodiment application to investigate the differences between an augmented reality see-through head-mounted-display (HMD) and a virtual reality HMD on the before-mentioned factors. Additionally, we considered the influence of the participant’s body-mass-index (BMI) and the BMI difference between participants and their avatars on the estimations. The 54 normal-weight female participants significantly underestimated the weight of their photorealistic, generic avatar in both conditions. Body weight estimations were significantly predicted by the participants’ BMI and the BMI difference. We also observed partially significant differences in presence and tendencies for differences in virtual body ownership between the systems. Our results offer new insights into the relationships of body weight perception in different MR environments and provide new perspectives for the development of therapeutic applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The appearance of avatars can potentially alter changes in their users’ perception and behavior. Based on this finding, approaches to support the therapy of body perception disturbances in eating or body weight disorders by mixed reality (MR) systems gain in importance. However, the methodological heterogeneity of previous research has made it difficult to assess the suitability of different MR systems for therapeutic use in these areas. The effects of MR system properties and related psychometric factors on body-related perceptions have so far remained unclear. We developed an interactive virtual mirror embodiment application to investigate the differences between an augmented reality see-through head-mounted-display (HMD) and a virtual reality HMD on the before-mentioned factors. Additionally, we considered the influence of the participant’s body-mass-index (BMI) and the BMI difference between participants and their avatars on the estimations. The 54 normal-weight female participants significantly underestimated the weight of their photorealistic, generic avatar in both conditions. Body weight estimations were significantly predicted by the participants’ BMI and the BMI difference. We also observed partially significant differences in presence and tendencies for differences in virtual body ownership between the systems. Our results offer new insights into the relationships of body weight perception in different MR environments and provide new perspectives for the development of therapeutic applications.",
"fno": "850800a462",
"keywords": [
"Augmented Reality",
"Avatars",
"Biomechanics",
"Human Computer Interaction",
"Medical Computing",
"Psychometric Testing",
"Mixed Reality Systems",
"Augmented Reality",
"Body Weight Perception",
"Photorealistic Avatars",
"Body Perception Disturbances",
"Eating Disorders",
"MR System Properties",
"Body Related Perceptions",
"Interactive Virtual Mirror Embodiment Application",
"Virtual Reality HMD",
"BMI Difference",
"Generic Avatar",
"Body Weight Estimations",
"Virtual Body Ownership",
"Normal Weight Female Participants",
"Human Centered Computing",
"Human Computer Interaction",
"Avatars",
"Medical Treatment",
"Estimation",
"Resists",
"Mirrors",
"Indexes",
"Augmented Reality",
"Mixed Reality",
"Immersion",
"Presence",
"Embodiment",
"Virtual Body Ownership",
"Agency",
"Body Image",
"Eating Disorders",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Empirical Studies In HCI",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Mixed Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Erik Wolf",
"givenName": "Erik",
"surname": "Wolf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HTS Group",
"fullName": "Nina Döllinger",
"givenName": "Nina",
"surname": "Döllinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "David Mal",
"givenName": "David",
"surname": "Mal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HTS Group",
"fullName": "Carolin Wienrich",
"givenName": "Carolin",
"surname": "Wienrich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Dortmund University,Computer Graphics Group",
"fullName": "Mario Botsch",
"givenName": "Mario",
"surname": "Botsch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Marc Erich Latoschik",
"givenName": "Marc Erich",
"surname": "Latoschik",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "462-473",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a452",
"articleId": "1pysvNRUnD2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a474",
"articleId": "1pysuR65ESQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg201404636",
"title": "Getting the Point Across: Exploring the Effects of Dynamic Virtual Humans in an Interactive Museum Exhibit on User Perceptions",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404636/13rRUxAASTc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404654",
"title": "Toward \"Pseudo-Haptic Avatars\": Modifying the Visual Animation of Self-Avatar Can Simulate the Perception of Weight Lifting",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404654/13rRUyft7D4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466636",
"title": "Superman vs Giant: A Study on Spatial Perception for a Multi-Scale Mixed Reality Flying Telepresence Interface",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466636/14M3DZXcLXa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546159",
"title": "Show me your face and I will tell you your height, weight and body mass index",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546159/17D45Wc1IJq",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a229",
"title": "REVAM: A Virtual Reality Application for Inducing Body Size Perception Modifications",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a229/17D45XeKgnO",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699238",
"title": "Is That Me?—Embodiment and Body Perception with an Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699238/19F1SZ9ch0I",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049718",
"title": "Body and Time: Virtual Embodiment and its Effect on Time Perception",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049718/1KYom5qAQo0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2019/3591/0/08844872",
"title": "A novel method to estimate Height, Weight and Body Mass Index from face images",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2019/08844872/1dx8p5CUDSg",
"parentPublication": {
"id": "proceedings/ic3/2019/3591/0",
"title": "2019 Twelfth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a060",
"title": "Photorealistic avatars to enhance the efficacy of Selfattachment psychotherapy",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a060/1qpzCwDcDKM",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a065",
"title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisA",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WODasM",
"doi": "10.1109/AIVR.2018.00061",
"title": "Omni-Learning XR Technologies and Visitor-Centered Experience in the Smart Art Museum",
"normalizedTitle": "Omni-Learning XR Technologies and Visitor-Centered Experience in the Smart Art Museum",
"abstract": "The exhibition \"Yo̅ga Modern Western Paintings of Japan\" at Museum of National Taipei University of Education (MoNTUE) brings the omni-learning XR technologies and builds a visitor-centered environment in the smart art museum. It takes \"light\" as the thematic context of both the exhibition content and the technology adaption. The omni-learning XR technologies, including Optical Camera Communication (OCC) & Augmented Reality (AR), are applied in this exhibition to inspire the desire of exploration of all ages. As a result, this exhibition achieves the mobile, personal and interactive museum experience in a smart art museum without sacrificing the precious moments of encountering the authentic artworks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The exhibition \"Yo̅ga Modern Western Paintings of Japan\" at Museum of National Taipei University of Education (MoNTUE) brings the omni-learning XR technologies and builds a visitor-centered environment in the smart art museum. It takes \"light\" as the thematic context of both the exhibition content and the technology adaption. The omni-learning XR technologies, including Optical Camera Communication (OCC) & Augmented Reality (AR), are applied in this exhibition to inspire the desire of exploration of all ages. As a result, this exhibition achieves the mobile, personal and interactive museum experience in a smart art museum without sacrificing the precious moments of encountering the authentic artworks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The exhibition \"Yo̅ga Modern Western Paintings of Japan\" at Museum of National Taipei University of Education (MoNTUE) brings the omni-learning XR technologies and builds a visitor-centered environment in the smart art museum. It takes \"light\" as the thematic context of both the exhibition content and the technology adaption. The omni-learning XR technologies, including Optical Camera Communication (OCC) & Augmented Reality (AR), are applied in this exhibition to inspire the desire of exploration of all ages. As a result, this exhibition achieves the mobile, personal and interactive museum experience in a smart art museum without sacrificing the precious moments of encountering the authentic artworks.",
"fno": "926900a258",
"keywords": [
"Art",
"Augmented Reality",
"Cameras",
"Exhibitions",
"Museums",
"User Interfaces",
"Interactive Museum Experience",
"Personal Museum Experience",
"Mobile Museum Experience",
"Technology Adaption",
"Visitor Centered Environment",
"Exhibition",
"Smart Art Museum",
"Visitor Centered Experience",
"Omni Learning XR Technologies",
"Painting",
"Art",
"Education",
"History",
"Media",
"Smart Devices",
"X Reality",
"Learning Technology Augmented Reality Optical Camera Communication Smart Musuem"
],
"authors": [
{
"affiliation": null,
"fullName": "Tsang-Gang Lin",
"givenName": "Tsang-Gang",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hsiang-Lan Shih",
"givenName": "Hsiang-Lan",
"surname": "Shih",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Ting Lee",
"givenName": "Chun-Ting",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hui-Yu Hsieh",
"givenName": "Hui-Yu",
"surname": "Hsieh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yi-Yuan Chen",
"givenName": "Yi-Yuan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chien-Kuo Liu",
"givenName": "Chien-Kuo",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "258-261",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9269-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "926900a256",
"articleId": "17D45X0yjT9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "926900a262",
"articleId": "17D45WK5Asq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935432",
"title": "Effects of mobile AR-enabled interactions on retention and transfer for learning in art museum contexts",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935432/12OmNBKmXkv",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2010/9339/0/05643290",
"title": "A day at the museum: An augmented fine-art exhibit",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2010/05643290/12OmNvStcA2",
"parentPublication": {
"id": "proceedings/ismar-amh/2010/9339/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2016/2722/0/07590376",
"title": "Virtual Museum: Playful Visitor Experience in the Real and Virtual World",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2016/07590376/12OmNvq5jys",
"parentPublication": {
"id": "proceedings/vs-games/2016/2722/0",
"title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2015/2612/0/07061046",
"title": "Proposal of appreciation support system to reflect the opinion of visitor about art objects in art museum",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2015/07061046/12OmNyuPLpp",
"parentPublication": {
"id": "proceedings/icmu/2015/2612/0",
"title": "2015 Eighth International Conference on Mobile Computing and Ubiquitous Networking (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sorucom/2014/1799/0/1799a147",
"title": "On the Way to a New Exhibition on the History of Computing at the Polytechnic Museum",
"doi": null,
"abstractUrl": "/proceedings-article/sorucom/2014/1799a147/12OmNzxyiJB",
"parentPublication": {
"id": "proceedings/sorucom/2014/1799/0",
"title": "2014 Third International Conference on Computer Technology in Russia and in the Former Soviet Union (SoRuCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446581",
"title": "VR Touch Museum",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446581/13bd1fKQxrI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2011/02/mit2011020025",
"title": "RFID-Based Guide Gives Museum Visitors More Freedom",
"doi": null,
"abstractUrl": "/magazine/it/2011/02/mit2011020025/13rRUwjGoCB",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisai/2021/0692/0/069200b021",
"title": "Design of 3D Exhibition Hall System of Art Museum Based On Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cisai/2021/069200b021/1BmO4qNyqR2",
"parentPublication": {
"id": "proceedings/cisai/2021/0692/0",
"title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a267",
"title": "Smart Survey Tool: A Multi Device Platform for Museum Visitor Tracking and Tracking Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a267/1cMF7ECItdC",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2020/8448/0/844800a124",
"title": "A VR/AR-based Display System for Arts and Crafts Museum",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2020/844800a124/1qJuehL8NG0",
"parentPublication": {
"id": "proceedings/cyberc/2020/8448/0",
"title": "2020 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJefXNbhYs",
"doi": "10.1109/VRW55335.2022.00337",
"title": "Aroaro - A Tool for Distributed Immersive Mixed Reality Visualization",
"normalizedTitle": "Aroaro - A Tool for Distributed Immersive Mixed Reality Visualization",
"abstract": "In this research demo we present three immersive scenarios on three XR modalities - VR, immersive AR on HoloLens and 2D AR on Android. These scenarios are a network of Harry Potter characters in VR, a map-based visualization of a soldier's history with rich attributes including images and sound on HoloLens, and a visualization of car racing on Android. These visualizations have been created with Aroaro our distributed mixed reality data visualization tool.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this research demo we present three immersive scenarios on three XR modalities - VR, immersive AR on HoloLens and 2D AR on Android. These scenarios are a network of Harry Potter characters in VR, a map-based visualization of a soldier's history with rich attributes including images and sound on HoloLens, and a visualization of car racing on Android. These visualizations have been created with Aroaro our distributed mixed reality data visualization tool.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this research demo we present three immersive scenarios on three XR modalities - VR, immersive AR on HoloLens and 2D AR on Android. These scenarios are a network of Harry Potter characters in VR, a map-based visualization of a soldier's history with rich attributes including images and sound on HoloLens, and a visualization of car racing on Android. These visualizations have been created with Aroaro our distributed mixed reality data visualization tool.",
"fno": "840200a972",
"keywords": [
"Android Operating System",
"Augmented Reality",
"Data Visualisation",
"Distributed Immersive Mixed Reality Visualization",
"Holo Lens",
"Android",
"Harry Potter Characters",
"Map Based Visualization",
"Rich Attributes",
"XR Modalities",
"Soldiers History",
"Distributed Mixed Reality Data Visualization Tool",
"Aroaro",
"Three Dimensional Displays",
"Conferences",
"Data Visualization",
"Mixed Reality",
"History",
"Automobiles",
"X Reality",
"Augmented Reality",
"Virtual Reality",
"Mixed Reality",
"Data Visualization",
"Immersive Analytics",
"Multi User",
"Networks",
"Human Centered Computing",
"Visualization",
"Visualization Application Domains",
"Visual Analytics",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality",
"Human Computer Interaction"
],
"authors": [
{
"affiliation": "The University of Auckland",
"fullName": "Fernando Beltrán",
"givenName": "Fernando",
"surname": "Beltrán",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Auckland",
"fullName": "David White",
"givenName": "David",
"surname": "White",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Auckland",
"fullName": "Jing Geng",
"givenName": "Jing",
"surname": "Geng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "972-973",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJefT5nEg8",
"name": "pvrw202284020-09757464s1-mm_840200a972.zip",
"size": "4.65 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757464s1-mm_840200a972.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a970",
"articleId": "1CJedNNnv0c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a974",
"articleId": "1CJcAGzhwxq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/03/08698351",
"title": "Immersive Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2019/03/08698351/19utOsQX9Nm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a940",
"title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a472",
"title": "Situated Visualization of IIoT Data on the Hololens 2",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a472/1CJend8tNew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09773967",
"title": "A Review of Interaction Techniques for Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09773967/1DjDoKqOJz2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a124",
"title": "X-Space: A Tool for Extending Mixed Reality Space from Web2D Visualization Anywhere",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a124/1J7W7m1yZgI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a865",
"title": "Learning and Teaching Fluid Dynamics using Augmented and Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a865/1J7Wr5spc76",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a923",
"title": "Cross Reality Authoring: A Mixed Reality Editor approach",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a923/1J7WtZdBjig",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049710",
"title": "Exploring Plausibility and Presence in Mixed Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049710/1KYoplRZLWM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798101",
"title": "Mixed Reality in Art Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798101/1cJ0RtUtRgk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WgdWP768",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00148",
"title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"normalizedTitle": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"abstract": "This research explores visualisation and interaction techniques to help disengage users from immersive virtual environments (IVEs) and transition them back to the real world. To gain such insight, we invited eleven Extended Reality (XR) experts to participate in an elicitation study to design the techniques for disengagement. We elicited a total of 132 techniques for four different scenarios of IVEs and classified them into six categories. This led us to discover key design patterns in disengagement strategies. Finally, we summarise key findings and illustrate how the disengagement techniques can be strategically escalated in the exemplary use cases of Ex-Cit XR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research explores visualisation and interaction techniques to help disengage users from immersive virtual environments (IVEs) and transition them back to the real world. To gain such insight, we invited eleven Extended Reality (XR) experts to participate in an elicitation study to design the techniques for disengagement. We elicited a total of 132 techniques for four different scenarios of IVEs and classified them into six categories. This led us to discover key design patterns in disengagement strategies. Finally, we summarise key findings and illustrate how the disengagement techniques can be strategically escalated in the exemplary use cases of Ex-Cit XR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research explores visualisation and interaction techniques to help disengage users from immersive virtual environments (IVEs) and transition them back to the real world. To gain such insight, we invited eleven Extended Reality (XR) experts to participate in an elicitation study to design the techniques for disengagement. We elicited a total of 132 techniques for four different scenarios of IVEs and classified them into six categories. This led us to discover key design patterns in disengagement strategies. Finally, we summarise key findings and illustrate how the disengagement techniques can be strategically escalated in the exemplary use cases of Ex-Cit XR.",
"fno": "536500a710",
"keywords": [
"Object Oriented Methods",
"User Interfaces",
"Virtual Reality",
"Design Patterns",
"Disengagement Strategies",
"Disengagement Techniques",
"Elicitation Study",
"Ex Cit XR",
"Expert Elicitation",
"Extended Reality Experts",
"Immersive Virtual Environments",
"Interaction Techniques",
"IVE",
"XR Techniques",
"Visualization",
"Extended Reality",
"Virtual Environments",
"X Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "University of Canterbury,New Zealand",
"fullName": "Thammathip Piumsomboon",
"givenName": "Thammathip",
"surname": "Piumsomboon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,New Zealand",
"fullName": "Gavin Ong",
"givenName": "Gavin",
"surname": "Ong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,New Zealand",
"fullName": "Cameron Urban",
"givenName": "Cameron",
"surname": "Urban",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Monash University,Australia",
"fullName": "Barrett Ens",
"givenName": "Barrett",
"surname": "Ens",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical university,China",
"fullName": "Xiaoliang Bai",
"givenName": "Xiaoliang",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,New Zealand",
"fullName": "Simon Hoermann",
"givenName": "Simon",
"surname": "Hoermann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "710-711",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a704",
"articleId": "1J7W8wUdqTK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a712",
"articleId": "1J7WwQlA4Gk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ucc-companion/2018/0359/0/035900a353",
"title": "A Review of Applications of Extended Reality in the Construction Domain",
"doi": null,
"abstractUrl": "/proceedings-article/ucc-companion/2018/035900a353/17D45WB0qbv",
"parentPublication": {
"id": "proceedings/ucc-companion/2018/0359/0",
"title": "2018 IEEE/ACM International Conference on Utility and Cloud Computing Companion (UCC Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2021/8390/0/839000a465",
"title": "Poster: Enabling Flexible Edge-assisted XR",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2021/839000a465/1B2HcrybLCE",
"parentPublication": {
"id": "proceedings/sec/2021/8390/0",
"title": "2021 IEEE/ACM Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2021/3784/0/378400a572",
"title": "Topic Trends in Issue Tracking System of Extended Reality Frameworks",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2021/378400a572/1B4m7XRlTcQ",
"parentPublication": {
"id": "proceedings/apsec/2021/3784/0",
"title": "2021 28th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a322",
"title": "Extended Reality Training for Business and Education: The New Generation of Learning Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a322/1J7W77jxOlq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a254",
"title": "Generative Research in the Context of Academic Extended Reality Research",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a254/1J7WcCweXhC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a167",
"title": "Flexible XR Prototyping – A Sports Spectating Example",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a167/1J7WuYXm6kg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a131",
"title": "Emulating Your eXtended World: An Emulation Environment for XR App Development",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a131/1JeEl77BoRy",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2023/4839/0/483900a067",
"title": "IEEE VR 2023 Workshop: Datasets for developing intelligent XR applications (DATA4XR)",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2023/483900a067/1N0wLk9I85W",
"parentPublication": {
"id": "proceedings/vrw/2023/4839/null",
"title": "2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a057",
"title": "Extended by Design: A Toolkit for Creation of XR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a057/1pBMiZ4INTG",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a439",
"title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a439/1yeQPu8aFlm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WuYXm6kg",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00038",
"title": "Flexible XR Prototyping – A Sports Spectating Example",
"normalizedTitle": "Flexible XR Prototyping – A Sports Spectating Example",
"abstract": "Extended Reality (XR) prototyping is a useful way that has the potential to assist the AR application development process. It allows for off-site development and evaluation in cases where on-site access is challenging or even impossible. In this work, we summarize our Flexible XR Prototyping framework, showing the different phases and considerations needed for an improved and more effortless XR prototyping experience. We then show how this can be used for the example use case of AR sports spectating in a stadium and provide some examples of the different prototypes developed for an on-site AR sports spectating application. Our goal is to share our own experience in AR prototyping and to spark discussion on the XR prototyping process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extended Reality (XR) prototyping is a useful way that has the potential to assist the AR application development process. It allows for off-site development and evaluation in cases where on-site access is challenging or even impossible. In this work, we summarize our Flexible XR Prototyping framework, showing the different phases and considerations needed for an improved and more effortless XR prototyping experience. We then show how this can be used for the example use case of AR sports spectating in a stadium and provide some examples of the different prototypes developed for an on-site AR sports spectating application. Our goal is to share our own experience in AR prototyping and to spark discussion on the XR prototyping process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extended Reality (XR) prototyping is a useful way that has the potential to assist the AR application development process. It allows for off-site development and evaluation in cases where on-site access is challenging or even impossible. In this work, we summarize our Flexible XR Prototyping framework, showing the different phases and considerations needed for an improved and more effortless XR prototyping experience. We then show how this can be used for the example use case of AR sports spectating in a stadium and provide some examples of the different prototypes developed for an on-site AR sports spectating application. Our goal is to share our own experience in AR prototyping and to spark discussion on the XR prototyping process.",
"fno": "536500a167",
"keywords": [
"Computer Games",
"Mobile Computing",
"Software Prototyping",
"Sport",
"AR Application Development Process",
"AR Prototyping",
"Different Prototypes",
"Extended Reality Prototyping",
"Flexible XR Prototyping Framework",
"Improved XR Prototyping Experience",
"More Effortless XR Prototyping Experience",
"Off Site Development",
"On Site Access",
"On Site AR Sports",
"Sports Spectating Example",
"XR Prototyping Process",
"Extended Reality",
"Prototypes",
"Sparks",
"X Reality",
"Sports"
],
"authors": [
{
"affiliation": "University of Otago",
"fullName": "Wei Hong Lo",
"givenName": "Wei Hong",
"surname": "Lo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Holger Regenbrecht",
"givenName": "Holger",
"surname": "Regenbrecht",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Stefanie Zollmann",
"givenName": "Stefanie",
"surname": "Zollmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "167-170",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a161",
"articleId": "1J7WaMjfuuc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a171",
"articleId": "1J7WiiRgyyI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/aivr/2018/9269/0/926900a258",
"title": "Omni-Learning XR Technologies and Visitor-Centered Experience in the Smart Art Museum",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a258/17D45WODasM",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2021/8390/0/839000a465",
"title": "Poster: Enabling Flexible Edge-assisted XR",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2021/839000a465/1B2HcrybLCE",
"parentPublication": {
"id": "proceedings/sec/2021/8390/0",
"title": "2021 IEEE/ACM Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a710",
"title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a710/1J7WgdWP768",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a131",
"title": "Emulating Your eXtended World: An Emulation Environment for XR App Development",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a131/1JeEl77BoRy",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a548",
"title": "XRtic: A Prototyping Toolkit for XR Applications using Cloth Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a548/1JrQZCCdOIo",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049665",
"title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a104",
"title": "XR Management Training Simulator supported by Content-Based scenario recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a104/1KmF8tEedk4",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2023/01/10035734",
"title": "The Ball is in Our Court: Conducting Visualization Research With Sports Experts",
"doi": null,
"abstractUrl": "/magazine/cg/2023/01/10035734/1KrcgLSqCUE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2023/4839/0/483900a067",
"title": "IEEE VR 2023 Workshop: Datasets for developing intelligent XR applications (DATA4XR)",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2023/483900a067/1N0wLk9I85W",
"parentPublication": {
"id": "proceedings/vrw/2023/4839/null",
"title": "2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a439",
"title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a439/1yeQPu8aFlm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KmF7rVz6Y8",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KmF8tEedk4",
"doi": "10.1109/AIVR56993.2022.00021",
"title": "XR Management Training Simulator supported by Content-Based scenario recommendation",
"normalizedTitle": "XR Management Training Simulator supported by Content-Based scenario recommendation",
"abstract": "Extended Reality (XR) is actively bringing innovative opportunities to business, from Virtual Reality (VR) assistants to Augmented Reality (AR) sales advertisements. However, there is currently a lack of adaptive profiling in training simulators, limiting their potential, effectiveness, dynamicity, and customization. This paper proposes the use of a Content-Based Recommender System integrated into an XR simulator to predict training scenarios for the user. Although customization can be achieved in many ways, this project focuses on in-game input data: user interaction with virtual objects, decisions made by the user, and metadata associated with the training scenario (plot keywords, characters involved in the scenarios, objects metadata, scenario category and room in which the scenario takes place). The results of the user study show that the participants enjoyed the simulation and preferred a dynamic and customized training narrative compared to a standard linear one. This innovative approach will enable in the future the automation of personalized training simulations, creating targeted and individual XR experiences for each user.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extended Reality (XR) is actively bringing innovative opportunities to business, from Virtual Reality (VR) assistants to Augmented Reality (AR) sales advertisements. However, there is currently a lack of adaptive profiling in training simulators, limiting their potential, effectiveness, dynamicity, and customization. This paper proposes the use of a Content-Based Recommender System integrated into an XR simulator to predict training scenarios for the user. Although customization can be achieved in many ways, this project focuses on in-game input data: user interaction with virtual objects, decisions made by the user, and metadata associated with the training scenario (plot keywords, characters involved in the scenarios, objects metadata, scenario category and room in which the scenario takes place). The results of the user study show that the participants enjoyed the simulation and preferred a dynamic and customized training narrative compared to a standard linear one. This innovative approach will enable in the future the automation of personalized training simulations, creating targeted and individual XR experiences for each user.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extended Reality (XR) is actively bringing innovative opportunities to business, from Virtual Reality (VR) assistants to Augmented Reality (AR) sales advertisements. However, there is currently a lack of adaptive profiling in training simulators, limiting their potential, effectiveness, dynamicity, and customization. This paper proposes the use of a Content-Based Recommender System integrated into an XR simulator to predict training scenarios for the user. Although customization can be achieved in many ways, this project focuses on in-game input data: user interaction with virtual objects, decisions made by the user, and metadata associated with the training scenario (plot keywords, characters involved in the scenarios, objects metadata, scenario category and room in which the scenario takes place). The results of the user study show that the participants enjoyed the simulation and preferred a dynamic and customized training narrative compared to a standard linear one. This innovative approach will enable in the future the automation of personalized training simulations, creating targeted and individual XR experiences for each user.",
"fno": "572500a104",
"keywords": [
"Augmented Reality",
"Computer Based Training",
"Computer Games",
"Digital Simulation",
"Meta Data",
"Recommender Systems",
"Virtual Reality",
"Adaptive Profiling",
"Augmented Reality Sales Advertisements",
"Content Based Recommender System",
"Content Based Scenario Recommendation",
"Customized Training Narrative",
"Dynamic Training Narrative",
"Dynamicity",
"Extended Reality",
"In Game Input Data",
"Individual XR Experiences",
"Innovative Opportunities",
"Objects Metadata",
"Personalized Training Simulations",
"Room In Which The Scenario Takes Place",
"Scenario Category",
"Targeted XR Experiences",
"Training Scenario",
"Training Simulators",
"User Interaction",
"User Study Show",
"Virtual Objects",
"Virtual Reality Assistants",
"XR Management Training Simulator",
"XR Simulator",
"Training",
"Solid Modeling",
"Project Management",
"Prototypes",
"Metadata",
"User Experience",
"X Reality",
"Extended Reality",
"Virtual Reality",
"Artificial Intelligence",
"Project Management",
"Storytelling",
"Training",
"Recommender Systems"
],
"authors": [
{
"affiliation": "Swinburne University of Technology,Centre For Transformative Media Technologies,Melbourne,Australia",
"fullName": "Irene Gironacci",
"givenName": "Irene",
"surname": "Gironacci",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "104-108",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5725-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "572500a099",
"articleId": "1KmFeG8t5Yc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "572500a109",
"articleId": "1KmFcUFPF3G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sec/2021/8390/0/839000a465",
"title": "Poster: Enabling Flexible Edge-assisted XR",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2021/839000a465/1B2HcrybLCE",
"parentPublication": {
"id": "proceedings/sec/2021/8390/0",
"title": "2021 IEEE/ACM Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a243",
"title": "Web XR User Interface Study in Designing 3D Layout Framework in Static Websites",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a243/1CJcCZLUE5q",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a322",
"title": "Extended Reality Training for Business and Education: The New Generation of Learning Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a322/1J7W77jxOlq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a710",
"title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a710/1J7WgdWP768",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a167",
"title": "Flexible XR Prototyping – A Sports Spectating Example",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a167/1J7WuYXm6kg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a131",
"title": "Emulating Your eXtended World: An Emulation Environment for XR App Development",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a131/1JeEl77BoRy",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a548",
"title": "XRtic: A Prototyping Toolkit for XR Applications using Cloth Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a548/1JrQZCCdOIo",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049665",
"title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2023/4839/0/483900a067",
"title": "IEEE VR 2023 Workshop: Datasets for developing intelligent XR applications (DATA4XR)",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2023/483900a067/1N0wLk9I85W",
"parentPublication": {
"id": "proceedings/vrw/2023/4839/null",
"title": "2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a057",
"title": "Extended by Design: A Toolkit for Creation of XR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a057/1pBMiZ4INTG",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxrXeGmdi",
"doi": "10.1109/VRW50115.2020.00144",
"title": "XR Framework for Collaborating Remote Heterogeneous Devices",
"normalizedTitle": "XR Framework for Collaborating Remote Heterogeneous Devices",
"abstract": "With the advent of high-speed 5G networks, establishing a remote space-sharing XR environment that supports real-time interaction based on the latest VR/AR/MR technology is receiving attention as a promising new research topic. With that aim, we have developed a framework that allows users with different types of devices, in different physical spaces, to effectively build a virtual world where collaborative work can be performed. By using this technology, new types of content can be effectively produced that can interact in real time with various commercial VR/MR head-mounted displays, smart mobile devices, and even beam projectors. In this paper, we demonstrate the effectiveness and potential of this framework through the example of game content development where MR and VR head-mounted display users interact in virtual space.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the advent of high-speed 5G networks, establishing a remote space-sharing XR environment that supports real-time interaction based on the latest VR/AR/MR technology is receiving attention as a promising new research topic. With that aim, we have developed a framework that allows users with different types of devices, in different physical spaces, to effectively build a virtual world where collaborative work can be performed. By using this technology, new types of content can be effectively produced that can interact in real time with various commercial VR/MR head-mounted displays, smart mobile devices, and even beam projectors. In this paper, we demonstrate the effectiveness and potential of this framework through the example of game content development where MR and VR head-mounted display users interact in virtual space.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the advent of high-speed 5G networks, establishing a remote space-sharing XR environment that supports real-time interaction based on the latest VR/AR/MR technology is receiving attention as a promising new research topic. With that aim, we have developed a framework that allows users with different types of devices, in different physical spaces, to effectively build a virtual world where collaborative work can be performed. By using this technology, new types of content can be effectively produced that can interact in real time with various commercial VR/MR head-mounted displays, smart mobile devices, and even beam projectors. In this paper, we demonstrate the effectiveness and potential of this framework through the example of game content development where MR and VR head-mounted display users interact in virtual space.",
"fno": "09090571",
"keywords": [
"Virtual Reality",
"Real Time Systems",
"Games",
"Resists",
"Conferences",
"Three Dimensional Displays",
"Performance Evaluation",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Paradigms",
"Mixed Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Dongguk University,Dept. of Multimedia,Korea",
"fullName": "Jongyong Kim",
"givenName": "Jongyong",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dongguk University,Dept. of Multimedia,Korea",
"fullName": "Jonghoon Song",
"givenName": "Jonghoon",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sogang University,Dept. of Computer Sci. and Eng.,Korea",
"fullName": "Woong Seo",
"givenName": "Woong",
"surname": "Seo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sogang University,Dept. of Computer Sci. and Eng.,Korea",
"fullName": "Insung Ihm",
"givenName": "Insung",
"surname": "Ihm",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dongguk University,Dept. of Multimedia Eng.,Korea",
"fullName": "Seung-Hyun Yoon",
"givenName": "Seung-Hyun",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dongguk University,Dept. of Multimedia,Korea",
"fullName": "Sanghun Park",
"givenName": "Sanghun",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "586-587",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090628",
"articleId": "1jIxpR1CPOo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090466",
"articleId": "1jIxjlFlp0k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446551",
"title": "A Demonstration of ShareVR: Co-Located Experiences for Virtual Reality Between HMD and Non-HMD Users",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446551/13bd1gzWkQD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a946",
"title": "Exploring the Effects of Multimedia Design in a Life English VR Serious Game",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a946/19m3JG8atEI",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a812",
"title": "Tangiball: Foot-Enabled Embodied Tangible Interaction with a Ball in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a812/1CJczvrAl0Y",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797828",
"title": "Immersive Virtual Reality and Gamification Within Procedurally Generated Environments to Increase Motivation During Gait Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797828/1cJ13n6aEsE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090421",
"title": "Analysis of Interaction Spaces for VR in Public Transport Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090421/1jIxr9dj52o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a051",
"title": "Exploring Virtual Environments by Visually Impaired Using a Mixed Reality Cane Without Visual Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a051/1pBMgh7AbaU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a434",
"title": "A-Visor and A-Camera: Arduino-based Cardboard Head-Mounted Controllers for VR Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a434/1tnWy6iYjMk",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQPu8aFlm",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00100",
"title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"normalizedTitle": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"abstract": "This paper introduces a multimodal XR mobility system mounted on an autonomous vehicle, which consists of immersive displays including a cylindrical screen or HMD and a motion platform in order to improve a passenger’s comfort. It is expected that the interior environment surrounding passengers will change dramatically when autonomous vehicles are realized in the near future. For example, since the driver is freed from driving he or she becomes a passenger without steering authority, and the windshield and windows are turned into information screens. The goal of this research is to develop technology to improve passenger’s comfort during auto-driving using the XR mobility platform, which is a multimodal VR/AR system with a tilt controllable seat mounted on an autonomous vehicle. This paper introduces the configuration of the XR mobility platform and proposes a movement sense control method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a multimodal XR mobility system mounted on an autonomous vehicle, which consists of immersive displays including a cylindrical screen or HMD and a motion platform in order to improve a passenger’s comfort. It is expected that the interior environment surrounding passengers will change dramatically when autonomous vehicles are realized in the near future. For example, since the driver is freed from driving he or she becomes a passenger without steering authority, and the windshield and windows are turned into information screens. The goal of this research is to develop technology to improve passenger’s comfort during auto-driving using the XR mobility platform, which is a multimodal VR/AR system with a tilt controllable seat mounted on an autonomous vehicle. This paper introduces the configuration of the XR mobility platform and proposes a movement sense control method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a multimodal XR mobility system mounted on an autonomous vehicle, which consists of immersive displays including a cylindrical screen or HMD and a motion platform in order to improve a passenger’s comfort. It is expected that the interior environment surrounding passengers will change dramatically when autonomous vehicles are realized in the near future. For example, since the driver is freed from driving he or she becomes a passenger without steering authority, and the windshield and windows are turned into information screens. The goal of this research is to develop technology to improve passenger’s comfort during auto-driving using the XR mobility platform, which is a multimodal VR/AR system with a tilt controllable seat mounted on an autonomous vehicle. This paper introduces the configuration of the XR mobility platform and proposes a movement sense control method.",
"fno": "129800a439",
"keywords": [
"Automotive Components",
"Helmet Mounted Displays",
"Human Factors",
"Railway Safety",
"Road Vehicles",
"Seats",
"Virtual Reality",
"Multimodal XR Mobility System",
"Autonomous Vehicle",
"Motion Platform",
"Passenger",
"Interior Environment Surrounding Passengers",
"XR Mobility Platform",
"Multimodal XR System Mounted",
"Resists",
"X Reality",
"Autonomous Vehicles",
"Automotive Components",
"Vehicles",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Taishi Sawabe",
"givenName": "Taishi",
"surname": "Sawabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Masayuki Kanbara",
"givenName": "Masayuki",
"surname": "Kanbara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Yuichiro Fujimoto",
"givenName": "Yuichiro",
"surname": "Fujimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Hirokazu Kato",
"givenName": "Hirokazu",
"surname": "Kato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "439-440",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeQPndPY40",
"name": "pismar-adjunct202112980-09585837s1-mm_129800a439.zip",
"size": "14.2 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar-adjunct202112980-09585837s1-mm_129800a439.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "129800a437",
"articleId": "1yeQD8KNChO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a441",
"articleId": "1yeQDAAUmg8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2010/4077/2/4077c880",
"title": "Passenger-Sharing Rate Model of New Expressway under Integrated Passenger Transport Channel",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077c880/12OmNCwUmBT",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802115",
"title": "Virtual reality: Improving passenger comfort in future flights",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802115/12OmNvRU0j9",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836521",
"title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836521/12OmNyo1nR0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699194",
"title": "Comfort Intelligence for Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699194/19F1NbD5DMs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699182",
"title": "International Workshop on Comfort Intelligence with AR for Autonomous Vehicle 2018",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699182/19F1R26pH0Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699226",
"title": "A Virtual Boarding System of an Autonomous Vehicle for Investigating the Effect of an AR Display on Passenger Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699226/19F1TgkuQaQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a066",
"title": "Identifying Factors to Improve the Coach Design and Service of the New-type EMU Sleeper Train: Findings from Passenger Satisfaction Survey",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a066/1APq9V9m1oI",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a850",
"title": "Toward Understanding the Effects of Visual and Tactile Stimuli to Reduce the Sensation of Movement with XR Mobility Platform",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a850/1CJdU6KHU3u",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a348",
"title": "Diminished Reality for Sense of Movement with XR Mobility Platform",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a348/1J7WmlCrYUo",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a497",
"title": "Co-Drive: the experience of a shared car trip between a driver and a remote passenger",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a497/1yfxI7RFmNy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzVXNJh",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzBwGKT",
"doi": "10.1109/3DUI.2015.7131744",
"title": "Haptic ChairIO: A system to study the effect of wind and floor vibration feedback on spatial orientation in VEs",
"normalizedTitle": "Haptic ChairIO: A system to study the effect of wind and floor vibration feedback on spatial orientation in VEs",
"abstract": "In this poster, we present the design, implementation, and evaluation plan of a system called Haptic ChairIO. A design space is first introduced, classifying sensory cues, and describing the potential usage of haptic cues on cognitive tasks in virtual environments (VEs). Then follows the design and implementation of Haptic ChairIO, which is extendable in providing various sensory cue types, consisting of a VR simulation, chair-based motion-control input, and multi-sensory output, including visual, audio, wind, and floor vibration feedback. A plan of evaluation has been made to study the effect of wind and floor vibration on spatial orientation in VEs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this poster, we present the design, implementation, and evaluation plan of a system called Haptic ChairIO. A design space is first introduced, classifying sensory cues, and describing the potential usage of haptic cues on cognitive tasks in virtual environments (VEs). Then follows the design and implementation of Haptic ChairIO, which is extendable in providing various sensory cue types, consisting of a VR simulation, chair-based motion-control input, and multi-sensory output, including visual, audio, wind, and floor vibration feedback. A plan of evaluation has been made to study the effect of wind and floor vibration on spatial orientation in VEs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this poster, we present the design, implementation, and evaluation plan of a system called Haptic ChairIO. A design space is first introduced, classifying sensory cues, and describing the potential usage of haptic cues on cognitive tasks in virtual environments (VEs). Then follows the design and implementation of Haptic ChairIO, which is extendable in providing various sensory cue types, consisting of a VR simulation, chair-based motion-control input, and multi-sensory output, including visual, audio, wind, and floor vibration feedback. A plan of evaluation has been made to study the effect of wind and floor vibration on spatial orientation in VEs.",
"fno": "07131744",
"keywords": [
"Vibrations",
"Floors",
"Haptic Interfaces",
"Visualization",
"Virtual Environments",
"Three Dimensional Displays",
"Software",
"Floor Vibration",
"3 D Display",
"Sensory Feedback",
"Wind"
],
"authors": [
{
"affiliation": "Worcester Polytechnic Institute, USA",
"fullName": "Mi Feng",
"givenName": "Mi",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Worcester Polytechnic Institute, USA",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ExxonMobil Research Qatar, Qatar",
"fullName": "Hazem Abdel-Moati",
"givenName": "Hazem",
"surname": "Abdel-Moati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Worcester Polytechnic Institute, USA",
"fullName": "Jacob C. Lindeman",
"givenName": "Jacob C.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "149-150",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07131743",
"articleId": "12OmNAXPynJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07131745",
"articleId": "12OmNymjN0q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a051",
"title": "A Novel Haptic Vibration Media and Its Application",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a051/12OmNCgJe7j",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460037",
"title": "An initial exploration of a multi-sensory design space: Tactile support for walking in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460037/12OmNrYCXTx",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810990",
"title": "Spatialized Haptic Rendering: Providing Impact Position Information in 6DOF Haptic Simulations Using Vibrations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810990/12OmNxw5BnV",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759662",
"title": "Pseudo-haptic feedback augmented with visual and tactile vibrations",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/02/tth2011020134",
"title": "What you can't feel won't hurt you: Evaluating haptic hardware using a haptic contrast sensitivity function",
"doi": null,
"abstractUrl": "/journal/th/2011/02/tth2011020134/13rRUxly8T9",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/04/06915735",
"title": "Force Control Tasks with Pure Haptic Feedback Promote Short-Term Focused Attention",
"doi": null,
"abstractUrl": "/journal/th/2014/04/06915735/13rRUy0qnGt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/03/tth2009030148",
"title": "Touch Is Everywhere: Floor Surfaces as Ambient Haptic Interfaces",
"doi": null,
"abstractUrl": "/journal/th/2009/03/tth2009030148/13rRUygBwhO",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08756096",
"title": "Impact of Different Sensory Stimuli on Presence in Credible Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08756096/1bpYGVRBVYc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797878",
"title": "A Multidirectional Haptic Feedback Prototype for Experiencing Collisions Between Virtual and Real Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797878/1cJ0I4GtxhC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798266",
"title": "Towards EEG-Based Haptic Interaction within Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798266/1cJ13SHk4dW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WrPcWIVO",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00198",
"title": "Haptics in VR Using Origami-Augmented Drones",
"normalizedTitle": "Haptics in VR Using Origami-Augmented Drones",
"abstract": "Virtual reality (VR) aims to make the human-computer interaction experience more immersive. Without the sensation of force and proper haptic feedback, however, the illusion of presence often breaks when a user tries to touch a virtual object. We present a prototype that provides haptic feedback in VR using origami carried by a drone. The drone delivers origami to the user's hand when that user is about to touch a virtual object. The haptic experience can easily be modified by changing the origami to other shapes and paper types. Our work demonstrates a novel, customisable, and low-cost solution to enable VR haptics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality (VR) aims to make the human-computer interaction experience more immersive. Without the sensation of force and proper haptic feedback, however, the illusion of presence often breaks when a user tries to touch a virtual object. We present a prototype that provides haptic feedback in VR using origami carried by a drone. The drone delivers origami to the user's hand when that user is about to touch a virtual object. The haptic experience can easily be modified by changing the origami to other shapes and paper types. Our work demonstrates a novel, customisable, and low-cost solution to enable VR haptics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality (VR) aims to make the human-computer interaction experience more immersive. Without the sensation of force and proper haptic feedback, however, the illusion of presence often breaks when a user tries to touch a virtual object. We present a prototype that provides haptic feedback in VR using origami carried by a drone. The drone delivers origami to the user's hand when that user is about to touch a virtual object. The haptic experience can easily be modified by changing the origami to other shapes and paper types. Our work demonstrates a novel, customisable, and low-cost solution to enable VR haptics.",
"fno": "536500a905",
"keywords": [
"Force Feedback",
"Haptic Interfaces",
"Interactive Devices",
"Virtual Reality",
"Drone",
"Haptic Experience",
"Human Computer Interaction Experience",
"Origami Augmented Drones",
"Proper Haptic Feedback",
"Virtual Object",
"Virtual Reality",
"VR Haptics",
"Human Computer Interaction",
"Shape",
"Force",
"Prototypes",
"Haptic Interfaces",
"Augmented Reality",
"Drones",
"Drone",
"Encountered Type Haptics",
"Immersion",
"Origami",
"Paper Folding",
"Quadcopter",
"Tactile Feedback",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Interaction Devices",
"Haptic Devices"
],
"authors": [
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Difeng Yu",
"givenName": "Difeng",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Weiwei Jiang",
"givenName": "Weiwei",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Andrew Irlitti",
"givenName": "Andrew",
"surname": "Irlitti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Tilman Dingler",
"givenName": "Tilman",
"surname": "Dingler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Eduardo Velloso",
"givenName": "Eduardo",
"surname": "Velloso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Jorge Goncalves",
"givenName": "Jorge",
"surname": "Goncalves",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,Australia",
"fullName": "Vassilis Kostakos",
"givenName": "Vassilis",
"surname": "Kostakos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "905-906",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a903",
"articleId": "1J7WhyM0jfi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a907",
"articleId": "1J7Whap3vRS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06484001",
"title": "Augmented Reverse-Origami: from 3D model to square paper",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06484001/12OmNzahc3h",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446128",
"title": "Rendering of Pressure and Textures Using Wearable Haptics in Immersive VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446128/13bd1eSlyt0",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446619",
"title": "Touchless Haptic Feedback for VR Rhythm Games",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446619/13bd1fKQxqX",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446522",
"title": "Touchless Haptic Feedback for Supernatural VR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446522/13bd1fWcuDF",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a727",
"title": "Phantom Touch phenomenon as a manifestation of the Visual-Auditory-Tactile Synaesthesia and its impact on the users in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a727/1J7WmsXKzxS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a748",
"title": "Wormholes in VR: Teleporting Hands for Flexible Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a748/1JrR93EDicE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2021/3954/0/395400a090",
"title": "A VR Experience of Being Warmly Swaddled Using Otonamaki and Haptics Device",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2021/395400a090/1wnPt2POtPi",
"parentPublication": {
"id": "proceedings/nicoint/2021/3954/0",
"title": "2021 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800z018",
"title": "Keynote Speaker: Wearable Haptics for Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800z018/1yeD29pZAsw",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tGcgESIWpa",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"acronym": "iiai-aai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1tGcjlaxzMs",
"doi": "10.1109/IIAI-AAI50415.2020.00097",
"title": "Development of Touch Valve UI with pseudo-haptics feedback based on vibration of tablet PC",
"normalizedTitle": "Development of Touch Valve UI with pseudo-haptics feedback based on vibration of tablet PC",
"abstract": "Once an accident occurs, it will cause serious damage to the surroundings and loss of life and social trust.Therefore, researchers studies for training system using virtual reality (VR) technology. The advantage of VR training is that it can reproduce accidents in operating plants that cannot occur in the real world, and trainees can train how to deal with them. VR training requires immersion to make it more effective. However, there is not currently convenient interface in VR training such as a valve used in industrial plants. Most common interface in industrial plants is valve so that if there is gap between training interface and actual interface, user cannot obtain immersion, for example, in case that mouse or keyboard of PC are interface for training system.To solve this problem, we aim to develop touch valve user interface (UI) which operability and feedback are close to actual valve by using touch panel and vibration presentation instead of reaction force as pseudo-haptic feedback. That operability and feedback near to actual valve will cause user perceptual illusion and can keep them immerse in VR environment.This paper explains our think why the operability and feedback of our valve UI can approach to that of actual valve, and the result of questionnaire which asked whether the operability and feedback of our valve is near that of an actual valve when vibration generates or not. The result of the Student’s t-test for paired data was df = 12, t = −4.382, P = 0.001 (<5%). Calculation of the t-test confirmed that the vibration that our valve presented is significant for user to percept feedback like that from an actual valve.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Once an accident occurs, it will cause serious damage to the surroundings and loss of life and social trust.Therefore, researchers studies for training system using virtual reality (VR) technology. The advantage of VR training is that it can reproduce accidents in operating plants that cannot occur in the real world, and trainees can train how to deal with them. VR training requires immersion to make it more effective. However, there is not currently convenient interface in VR training such as a valve used in industrial plants. Most common interface in industrial plants is valve so that if there is gap between training interface and actual interface, user cannot obtain immersion, for example, in case that mouse or keyboard of PC are interface for training system.To solve this problem, we aim to develop touch valve user interface (UI) which operability and feedback are close to actual valve by using touch panel and vibration presentation instead of reaction force as pseudo-haptic feedback. That operability and feedback near to actual valve will cause user perceptual illusion and can keep them immerse in VR environment.This paper explains our think why the operability and feedback of our valve UI can approach to that of actual valve, and the result of questionnaire which asked whether the operability and feedback of our valve is near that of an actual valve when vibration generates or not. The result of the Student’s t-test for paired data was df = 12, t = −4.382, P = 0.001 (<5%). Calculation of the t-test confirmed that the vibration that our valve presented is significant for user to percept feedback like that from an actual valve.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Once an accident occurs, it will cause serious damage to the surroundings and loss of life and social trust.Therefore, researchers studies for training system using virtual reality (VR) technology. The advantage of VR training is that it can reproduce accidents in operating plants that cannot occur in the real world, and trainees can train how to deal with them. VR training requires immersion to make it more effective. However, there is not currently convenient interface in VR training such as a valve used in industrial plants. Most common interface in industrial plants is valve so that if there is gap between training interface and actual interface, user cannot obtain immersion, for example, in case that mouse or keyboard of PC are interface for training system.To solve this problem, we aim to develop touch valve user interface (UI) which operability and feedback are close to actual valve by using touch panel and vibration presentation instead of reaction force as pseudo-haptic feedback. That operability and feedback near to actual valve will cause user perceptual illusion and can keep them immerse in VR environment.This paper explains our think why the operability and feedback of our valve UI can approach to that of actual valve, and the result of questionnaire which asked whether the operability and feedback of our valve is near that of an actual valve when vibration generates or not. The result of the Student’s t-test for paired data was df = 12, t = −4.382, P = 0.001 (<5%). Calculation of the t-test confirmed that the vibration that our valve presented is significant for user to percept feedback like that from an actual valve.",
"fno": "739700a456",
"keywords": [
"Computer Based Training",
"Haptic Interfaces",
"Human Computer Interaction",
"Valves",
"Vibrations",
"Virtual Reality",
"Training System",
"VR Training",
"Operating Plants",
"Industrial Plants",
"Training Interface",
"Touch Valve User Interface",
"Pseudohaptic Feedback",
"VR Environment",
"Touch Valve UI Development",
"Virtual Reality Technology",
"Touch Panel",
"Vibration Presentation",
"Student T Test",
"Reaction Force",
"User Perceptual Illusion",
"Training",
"Vibrations",
"Force",
"Virtual Reality",
"User Interfaces",
"Valves",
"Software",
"Pseudo Haptics",
"Vibration",
"Virtual Reality",
"Cross Modal",
"User Interface",
"Immersion",
"Touch Panel",
"Smartphone",
"Tablet PC",
"Human Computer Interaction HCI"
],
"authors": [
{
"affiliation": "Okayama Shoka University,Faculty of Business Administration,Okayama,Japan",
"fullName": "Hirotsugu Minowa",
"givenName": "Hirotsugu",
"surname": "Minowa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Okayama Shoka University,Faculty of Business Administration,Okayama,Japan",
"fullName": "Chujia Zhang",
"givenName": "Chujia",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiai-aai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "456-462",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7397-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "739700a450",
"articleId": "1tGcqncXL32",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "739700a463",
"articleId": "1tGcrlrC7pS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2014/6636/0/6636a272",
"title": "Flow Induced Vibration Analysis of the New Two-Way Zero Leakage Large Diameter Electromagnetic Drive Valve",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2014/6636a272/12OmNrkT7Gj",
"parentPublication": {
"id": "proceedings/icicta/2014/6636/0",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfpee/2010/7378/0/05663352",
"title": "Study on Modeling of Electro-Hydraulic Variable Valve Mechanism Based on CATIA",
"doi": null,
"abstractUrl": "/proceedings-article/icfpee/2010/05663352/12OmNybfr0P",
"parentPublication": {
"id": "proceedings/icfpee/2010/7378/0",
"title": "2010 International Conference on Future Power and Energy Engineering (ICFPEE 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a675",
"title": "Research on Dynamic Characteristics of the Rotary Valve",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a675/12OmNyoSbge",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2014/6636/0/6636a214",
"title": "Dynamic Characterization Research of the New Type Zero Leakage Valve with Double Direction",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2014/6636a214/12OmNzdoMQx",
"parentPublication": {
"id": "proceedings/icicta/2014/6636/0",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a748",
"title": "Wormholes in VR: Teleporting Hands for Flexible Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a748/1JrR93EDicE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10054238",
"title": "GroundFlow: Liquid-based Haptics for Simulating Fluid on the Ground in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10054238/1L6HOIvywcU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2019/2632/0/263200a447",
"title": "Reliability Analysis of Valve Mechanism for Ship Gun Shell Valve",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2019/263200a447/1cI6qBLfOZq",
"parentPublication": {
"id": "proceedings/icris/2019/2632/0",
"title": "2019 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797762",
"title": "A New Interactive Haptic Device for Getting Physical Contact Feeling of Virtual Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797762/1cJ0ToWEx9K",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a161",
"title": "Research on the Influence of Throttle Valve Stiffness on Damping Characteristics of Shock Absorber",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a161/1rvCEQsrkhG",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwMXnv0",
"title": "2014 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCctfc5",
"doi": "10.1109/VR.2014.6802057",
"title": "Design and evaluation of Binaural auditory rendering for CAVEs",
"normalizedTitle": "Design and evaluation of Binaural auditory rendering for CAVEs",
"abstract": "We describe an experiment whose goal is to investigate the usage of different audio rendering techniques delivered through headphones while walking inside a wide four-side CAVE environment. In our experiment, participants had to physically walked along a virtual path exposed to different auditory stimuli. Each subject was exposed to three conditions: Stereo, Binaural sound spatially congruent with visual and binaural sound spatially incongruent with visuals and had to rate subjectively each. The results of the experiment showed increased preference ratings for the binaural audio rendering, followed by stereo rendering. As expected incongruent spatial cues were ranked significantly lower. Binaural rendering can deliver an increased immersive experience and do no require specialized hardware.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe an experiment whose goal is to investigate the usage of different audio rendering techniques delivered through headphones while walking inside a wide four-side CAVE environment. In our experiment, participants had to physically walked along a virtual path exposed to different auditory stimuli. Each subject was exposed to three conditions: Stereo, Binaural sound spatially congruent with visual and binaural sound spatially incongruent with visuals and had to rate subjectively each. The results of the experiment showed increased preference ratings for the binaural audio rendering, followed by stereo rendering. As expected incongruent spatial cues were ranked significantly lower. Binaural rendering can deliver an increased immersive experience and do no require specialized hardware.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe an experiment whose goal is to investigate the usage of different audio rendering techniques delivered through headphones while walking inside a wide four-side CAVE environment. In our experiment, participants had to physically walked along a virtual path exposed to different auditory stimuli. Each subject was exposed to three conditions: Stereo, Binaural sound spatially congruent with visual and binaural sound spatially incongruent with visuals and had to rate subjectively each. The results of the experiment showed increased preference ratings for the binaural audio rendering, followed by stereo rendering. As expected incongruent spatial cues were ranked significantly lower. Binaural rendering can deliver an increased immersive experience and do no require specialized hardware.",
"fno": "06802057",
"keywords": [
"Rendering Computer Graphics",
"Visualization",
"Virtual Environments",
"Headphones",
"Music",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Audio Input Output"
],
"authors": [
{
"affiliation": "Medialogy, Aalborg University Copenhagen",
"fullName": "F. Grani",
"givenName": "F.",
"surname": "Grani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria IRISA Rennes, France",
"fullName": "F. Argelaguet",
"givenName": "F.",
"surname": "Argelaguet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria IRISA Rennes, France",
"fullName": "V. Gouranton",
"givenName": "V.",
"surname": "Gouranton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria IRISA Rennes, France",
"fullName": "M. Badawi",
"givenName": "M.",
"surname": "Badawi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria IRISA Rennes, France",
"fullName": "R. Gaugne",
"givenName": "R.",
"surname": "Gaugne",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Medialogy, Aalborg University Copenhagen",
"fullName": "S. Serafin",
"givenName": "S.",
"surname": "Serafin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria IRISA Rennes, France",
"fullName": "A. Lecuyer",
"givenName": "A.",
"surname": "Lecuyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-03-01T00:00:00",
"pubType": "proceedings",
"pages": "73-74",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-2871-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06802056",
"articleId": "12OmNBqdrca",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06802058",
"articleId": "12OmNAObbAg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2007/1016/0/04285041",
"title": "Analysis and Synthesis of Binaural Parameters for Efficient 3D Audio Rendering in MPEG Surround",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04285041/12OmNvnOwtE",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2002/02/v0099",
"title": "A Generic Rendering System",
"doi": null,
"abstractUrl": "/journal/tg/2002/02/v0099/13rRUwbs2gi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577131",
"title": "Binaural Rendering for Sound Navigation and Orientation",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577131/17D45XoXP3v",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864578",
"title": "Mono-Stereoscopic Camera in a Virtual Reality Environment: Case Study in Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864578/1e5Zs94AhSE",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090518",
"title": "Implementing Continuous-Azimuth Binaural Sound in Unity 3D",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090518/1jIxq83yDGo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c472",
"title": "Inverse Rendering for Complex Indoor Scenes: Shape, Spatially-Varying Lighting and SVBRDF From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c472/1m3o03C864M",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a398",
"title": "Rendering Optimizations for Virtual Reality Using Eye-Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a398/1oZBBw6BBa8",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a133",
"title": "Performace Analysis and Optimization for Short-time Rendering Frames",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a133/1p1goCfdXq0",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a316",
"title": "Cloud Rendering Scheme for Standalone Virtual Reality Headset",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a316/1vg8ftWdDoY",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a340",
"title": "Device-Agnostic Augmented Reality Rendering Pipeline for AR in Medicine",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a340/1yeQMJ5IoX6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNClQ0yz",
"doi": "10.1109/VR.2017.7892307",
"title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"normalizedTitle": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"abstract": "Simulator sickness is an issue in virtual reality environments. In a virtual world, sensory conflict between visual sensation and self-motion perception occurs readily. Contradiction between visual and vestibular sensation is a dominant cause of motion sickness. Vection is a visually evoked illusion of self-motion. Vection occurs when a stationary human experiences locomotor stimulation over a wider area of the field of view, and senses motion when in fact there is none. Strong vection has been associated with simulator sickness. In this poster, the authors present results of a pilot study based on a hypothesis that simulator sickness can be mitigated by passively responding to the body sway. Commercially available air cushions were applied for VR environments. Measurable mitigation of simulator sickness was achieved by physically responding to vection. Allowing body sway encourages moderating the sensory conflict between visual sensation and self-motion perception. Also, the shapes of air cushions on seat backs were found to be an important variable.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simulator sickness is an issue in virtual reality environments. In a virtual world, sensory conflict between visual sensation and self-motion perception occurs readily. Contradiction between visual and vestibular sensation is a dominant cause of motion sickness. Vection is a visually evoked illusion of self-motion. Vection occurs when a stationary human experiences locomotor stimulation over a wider area of the field of view, and senses motion when in fact there is none. Strong vection has been associated with simulator sickness. In this poster, the authors present results of a pilot study based on a hypothesis that simulator sickness can be mitigated by passively responding to the body sway. Commercially available air cushions were applied for VR environments. Measurable mitigation of simulator sickness was achieved by physically responding to vection. Allowing body sway encourages moderating the sensory conflict between visual sensation and self-motion perception. Also, the shapes of air cushions on seat backs were found to be an important variable.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simulator sickness is an issue in virtual reality environments. In a virtual world, sensory conflict between visual sensation and self-motion perception occurs readily. Contradiction between visual and vestibular sensation is a dominant cause of motion sickness. Vection is a visually evoked illusion of self-motion. Vection occurs when a stationary human experiences locomotor stimulation over a wider area of the field of view, and senses motion when in fact there is none. Strong vection has been associated with simulator sickness. In this poster, the authors present results of a pilot study based on a hypothesis that simulator sickness can be mitigated by passively responding to the body sway. Commercially available air cushions were applied for VR environments. Measurable mitigation of simulator sickness was achieved by physically responding to vection. Allowing body sway encourages moderating the sensory conflict between visual sensation and self-motion perception. Also, the shapes of air cushions on seat backs were found to be an important variable.",
"fno": "07892307",
"keywords": [
"Games",
"Visualization",
"Fatigue",
"Stomach",
"Virtual Reality",
"Shape",
"Tactile Sensors",
"Simulator Sickness",
"Vection",
"Tactile Feedback",
"HMD"
],
"authors": [
{
"affiliation": "Tokyo Institute of Technology, Japan",
"fullName": "Yoshikazu Onuki",
"givenName": "Yoshikazu",
"surname": "Onuki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology, Japan",
"fullName": "Shunsuke Ono",
"givenName": "Shunsuke",
"surname": "Ono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology, Japan",
"fullName": "Itsuo Kumazawa",
"givenName": "Itsuo",
"surname": "Kumazawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "323-324",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892306",
"articleId": "12OmNvkpl8n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892308",
"articleId": "12OmNvA1hFe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2005/8929/0/01492799",
"title": "Virtual acceleration with galvanic vestibular stimulation in a virtual reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492799/12OmNwJPMZr",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892284",
"title": "Diminished reality for acceleration stimulus: Motion sickness reduction with vection for autonomous driving",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892284/12OmNwx3QdZ",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492765",
"title": "Towards lean and elegant self-motion simulation in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492765/12OmNxWcHjT",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836521",
"title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836521/12OmNyo1nR0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267239",
"title": "Towards a Machine-Learning Approach for Sickness Prediction in 360° Stereoscopic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267239/13rRUyYSWt3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a153",
"title": "Machine Learning Architectures to Predict Motion Sickness Using a Virtual Reality Rollercoaster Simulation Tool",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a153/17D45W2Wyyv",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a814",
"title": "Reverse 3D Sound Flow Can Decrease VR Sickness?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a814/1J7WjyIbnrO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798297",
"title": "Unifying Research to Address Motion Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798297/1cJ13JSUePK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysvxeFG4E",
"doi": "10.1109/ISMAR50242.2020.00092",
"title": "Visual-Auditory Redirection: Multimodal Integration of Incongruent Visual and Auditory Cues for Redirected Walking",
"normalizedTitle": "Visual-Auditory Redirection: Multimodal Integration of Incongruent Visual and Auditory Cues for Redirected Walking",
"abstract": "In this paper, we present a study of redirected walking (RDW) that shifts the positional relationship between visual and auditory cues during curvature manipulation. It has been shown that, when presented with incongruent visual and auditory spatial cues during a localization task, human observers integrate that information based on each cue's relative reliability, which determines their final perception of the target object's location. This multi-modal integration model is known as maximum likelihood estimation (MLE). By altering the visual location of objects that users perceive in virtual reality (VR) through auditory cues during redirection manipulation, we expect fewer users to notice the manipulation, which helps increase the usable curvature gain. Most existing studies on MLE in multi-modal integration have used random-dot stereograms as visual cues under stable motion states. In the present study, we first investigated whether this model holds while walking in VR environment. Our results indicate that in a walking state, users' perceptions of the target object's location shift toward auditory cue as the reliability of vision decreases, in keeping with the trend shown in previous studies on MLE. Based on this result, we then investigated the detection threshold of curvature gains during redirection manipulation under a condition with congruent visual-auditory cues as well as a condition in which users' location perceptions of the target object are considered to be affected by the incongruent auditory cue. We found that the detection threshold of curvature gains was higher with incongruent visual-auditory cues than with congruent cues. These results show that incongruent multimodal cues in VR may have a promising application in the area of redirected walking.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a study of redirected walking (RDW) that shifts the positional relationship between visual and auditory cues during curvature manipulation. It has been shown that, when presented with incongruent visual and auditory spatial cues during a localization task, human observers integrate that information based on each cue's relative reliability, which determines their final perception of the target object's location. This multi-modal integration model is known as maximum likelihood estimation (MLE). By altering the visual location of objects that users perceive in virtual reality (VR) through auditory cues during redirection manipulation, we expect fewer users to notice the manipulation, which helps increase the usable curvature gain. Most existing studies on MLE in multi-modal integration have used random-dot stereograms as visual cues under stable motion states. In the present study, we first investigated whether this model holds while walking in VR environment. Our results indicate that in a walking state, users' perceptions of the target object's location shift toward auditory cue as the reliability of vision decreases, in keeping with the trend shown in previous studies on MLE. Based on this result, we then investigated the detection threshold of curvature gains during redirection manipulation under a condition with congruent visual-auditory cues as well as a condition in which users' location perceptions of the target object are considered to be affected by the incongruent auditory cue. We found that the detection threshold of curvature gains was higher with incongruent visual-auditory cues than with congruent cues. These results show that incongruent multimodal cues in VR may have a promising application in the area of redirected walking.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a study of redirected walking (RDW) that shifts the positional relationship between visual and auditory cues during curvature manipulation. It has been shown that, when presented with incongruent visual and auditory spatial cues during a localization task, human observers integrate that information based on each cue's relative reliability, which determines their final perception of the target object's location. This multi-modal integration model is known as maximum likelihood estimation (MLE). By altering the visual location of objects that users perceive in virtual reality (VR) through auditory cues during redirection manipulation, we expect fewer users to notice the manipulation, which helps increase the usable curvature gain. Most existing studies on MLE in multi-modal integration have used random-dot stereograms as visual cues under stable motion states. In the present study, we first investigated whether this model holds while walking in VR environment. Our results indicate that in a walking state, users' perceptions of the target object's location shift toward auditory cue as the reliability of vision decreases, in keeping with the trend shown in previous studies on MLE. Based on this result, we then investigated the detection threshold of curvature gains during redirection manipulation under a condition with congruent visual-auditory cues as well as a condition in which users' location perceptions of the target object are considered to be affected by the incongruent auditory cue. We found that the detection threshold of curvature gains was higher with incongruent visual-auditory cues than with congruent cues. These results show that incongruent multimodal cues in VR may have a promising application in the area of redirected walking.",
"fno": "850800a639",
"keywords": [
"Maximum Likelihood Estimation",
"Virtual Reality",
"Vision",
"Visual Perception",
"Visual Auditory Redirection",
"Incongruent Visual",
"Redirected Walking",
"Auditory Spatial Cues",
"Multimodal Integration Model",
"MLE",
"Redirection Manipulation",
"Visual Cues",
"Curvature Gains",
"Visual Auditory Cues",
"Incongruent Auditory Cue",
"Incongruent Multimodal Cues",
"Legged Locomotion",
"Visualization",
"Maximum Likelihood Estimation",
"Solid Modeling",
"Observers",
"Reliability",
"Task Analysis",
"Human Centered Computing Visualization Visualization Techniques Treemaps",
"Human Centered Computing Visualization Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "The University of Tokyo",
"fullName": "Peizhong Gao",
"givenName": "Peizhong",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo JSPS",
"fullName": "Keigo Matsumoto",
"givenName": "Keigo",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo JSPS",
"fullName": "Takuji Narumi",
"givenName": "Takuji",
"surname": "Narumi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo JST PRESTO",
"fullName": "Michitaka Hirose",
"givenName": "Michitaka",
"surname": "Hirose",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "639-648",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a627",
"articleId": "1pysyecdlzq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a649",
"articleId": "1pysvKFdazS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802057",
"title": "Design and evaluation of Binaural auditory rendering for CAVEs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802057/12OmNCctfc5",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460038",
"title": "Curvature manipulation techniques in redirection using haptic cues",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460038/12OmNxTVU2T",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504745",
"title": "Acoustic redirected walking with auditory cues by means of wave field synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504745/12OmNxYtu4K",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446062",
"title": "Biomechanical Parameters Under Curvature Gains and Bending Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446062/13bd1fKQxrR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010024",
"title": "The Effects of Ecological Auditory Feedback on Rhythmic Walking Interaction",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010024/13rRUIJcWtD",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a524",
"title": "The Chaotic Behavior of Redirection – Revisiting Simulations in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798286",
"title": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798286/1cJ0PIoIPV6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyv7mbV",
"doi": "10.1109/VR.2017.7892319",
"title": "Adaptive 360-degree video streaming using layered video coding",
"normalizedTitle": "Adaptive 360-degree video streaming using layered video coding",
"abstract": "Virtual reality and 360-degree video streaming are growing rapidly; however, streaming 360-degree video is very challenging due to high bandwidth requirements. To address this problem, the video quality is adjusted according to the user viewport prediction. High quality video is only streamed for the user viewport, reducing the overall bandwidth consumption. Existing solutions use shallow buffers limited by the accuracy of viewport prediction. Therefore, playback is prone to video freezes which are very destructive for the Quality of Experience (QoE). We propose using layered encoding for 360-degree video to improve QoE by reducing the probability of video freezes and the latency of response to the user head movements. Moreover, this scheme reduces the storage requirements significantly and improves in-network cache performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality and 360-degree video streaming are growing rapidly; however, streaming 360-degree video is very challenging due to high bandwidth requirements. To address this problem, the video quality is adjusted according to the user viewport prediction. High quality video is only streamed for the user viewport, reducing the overall bandwidth consumption. Existing solutions use shallow buffers limited by the accuracy of viewport prediction. Therefore, playback is prone to video freezes which are very destructive for the Quality of Experience (QoE). We propose using layered encoding for 360-degree video to improve QoE by reducing the probability of video freezes and the latency of response to the user head movements. Moreover, this scheme reduces the storage requirements significantly and improves in-network cache performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality and 360-degree video streaming are growing rapidly; however, streaming 360-degree video is very challenging due to high bandwidth requirements. To address this problem, the video quality is adjusted according to the user viewport prediction. High quality video is only streamed for the user viewport, reducing the overall bandwidth consumption. Existing solutions use shallow buffers limited by the accuracy of viewport prediction. Therefore, playback is prone to video freezes which are very destructive for the Quality of Experience (QoE). We propose using layered encoding for 360-degree video to improve QoE by reducing the probability of video freezes and the latency of response to the user head movements. Moreover, this scheme reduces the storage requirements significantly and improves in-network cache performance.",
"fno": "07892319",
"keywords": [
"Cache Storage",
"Probability",
"Quality Of Experience",
"Video Coding",
"Video Streaming",
"Virtual Reality",
"Adaptive 360 Degree Video Streaming",
"Bandwidth Requirements",
"Video Quality",
"User Viewport Prediction",
"Bandwidth Consumption",
"Shallow Buffers",
"Quality Of Experience",
"Layered Video Encoding",
"Qo E",
"Video Freeze Probability",
"User Head Movements",
"Storage Requirements",
"In Network Cache Performance",
"Streaming Media",
"Bandwidth",
"Encoding",
"Static V Ar Compensators",
"Virtual Reality",
"Video Coding",
"Adaptive 360 Video Streaming",
"SVC",
"Video Freeze"
],
"authors": [
{
"affiliation": "The University of Texas at Dallas, Texas, U.S.A.",
"fullName": "Afshin TaghaviNasrabadi",
"givenName": "Afshin",
"surname": "TaghaviNasrabadi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Dallas, Texas, U.S.A.",
"fullName": "Anahita Mahzari",
"givenName": "Anahita",
"surname": "Mahzari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Dallas, Texas, U.S.A.",
"fullName": "Joseph D. Beshay",
"givenName": "Joseph D.",
"surname": "Beshay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Dallas, Texas, U.S.A.",
"fullName": "Ravi Prakash",
"givenName": "Ravi",
"surname": "Prakash",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "347-348",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892318",
"articleId": "12OmNBQ2W0V",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892320",
"articleId": "12OmNxUdv7D",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2021/0668/0/066800a462",
"title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva",
"parentPublication": {
"id": "proceedings/msn/2021/0668/0",
"title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859789",
"title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a324",
"title": "VAS360: QoE-Driven Viewport Adaptive Streaming for 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a324/1cJ0BSNq6FW",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2019/16/0/08999460",
"title": "Advancing user quality of experience in 360-degree video streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC",
"parentPublication": {
"id": "proceedings/ifip-networking/2019/16/0",
"title": "2019 IFIP Networking Conference (IFIP Networking)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2020/4199/0/09016449",
"title": "Buffer Based Adaptation Using Scalable Video Coding for 360-Degree Video Streaming over NDN",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2020/09016449/1hQqTVygVbO",
"parentPublication": {
"id": "proceedings/icoin/2020/4199/0",
"title": "2020 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/07/09024132",
"title": "EPASS360: QoE-Aware 360-Degree Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2021/07/09024132/1hVmzVfm7sY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2021/04/09247254",
"title": "Enhancing QoE for Viewport-Adaptive 360-Degree Video Streaming: Perception Analysis and Implementation",
"doi": null,
"abstractUrl": "/magazine/mu/2021/04/09247254/1osls1hp8hq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a085",
"title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09333964",
"title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H5KlMT4sBa",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"acronym": "iisa",
"groupId": "1802852",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H5KpY37ODe",
"doi": "10.1109/IISA56318.2022.9904420",
"title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video",
"normalizedTitle": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video",
"abstract": "360-degree streaming videos can provide a rich immersive experiences to the users. However, it requires an extremely high bandwidth network. One of the common solutions for saving bandwidth consumption is to stream only a portion of video covered by the user’s viewport. To do that, the user’s viewpoint prediction is indispensable. In existing viewport prediction methods, they mainly concentrate on the user’s head movement trajectory and video saliency. None of them consider navigation information contained in the video, which can turn the attention of the user to specific regions in the video with high probability. Such information can be included in video subtitles, especially the one in 360-degree virtual tourism videos. This fact reveals the potential contribution of video subtitles to viewport prediction. Therefore, in this paper, a subtitle-based viewport prediction model for 360-degree virtual tourism videos is proposed. This model leverages the navigation information in the video subtitles in addition to head movement trajectory and video saliency, to improve the prediction accuracy. The experimental results demonstrate that the proposed model outperforms baseline methods which only use head movement trajectory and video saliency for viewport prediction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "360-degree streaming videos can provide a rich immersive experiences to the users. However, it requires an extremely high bandwidth network. One of the common solutions for saving bandwidth consumption is to stream only a portion of video covered by the user’s viewport. To do that, the user’s viewpoint prediction is indispensable. In existing viewport prediction methods, they mainly concentrate on the user’s head movement trajectory and video saliency. None of them consider navigation information contained in the video, which can turn the attention of the user to specific regions in the video with high probability. Such information can be included in video subtitles, especially the one in 360-degree virtual tourism videos. This fact reveals the potential contribution of video subtitles to viewport prediction. Therefore, in this paper, a subtitle-based viewport prediction model for 360-degree virtual tourism videos is proposed. This model leverages the navigation information in the video subtitles in addition to head movement trajectory and video saliency, to improve the prediction accuracy. The experimental results demonstrate that the proposed model outperforms baseline methods which only use head movement trajectory and video saliency for viewport prediction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "360-degree streaming videos can provide a rich immersive experiences to the users. However, it requires an extremely high bandwidth network. One of the common solutions for saving bandwidth consumption is to stream only a portion of video covered by the user’s viewport. To do that, the user’s viewpoint prediction is indispensable. In existing viewport prediction methods, they mainly concentrate on the user’s head movement trajectory and video saliency. None of them consider navigation information contained in the video, which can turn the attention of the user to specific regions in the video with high probability. Such information can be included in video subtitles, especially the one in 360-degree virtual tourism videos. This fact reveals the potential contribution of video subtitles to viewport prediction. Therefore, in this paper, a subtitle-based viewport prediction model for 360-degree virtual tourism videos is proposed. This model leverages the navigation information in the video subtitles in addition to head movement trajectory and video saliency, to improve the prediction accuracy. The experimental results demonstrate that the proposed model outperforms baseline methods which only use head movement trajectory and video saliency for viewport prediction.",
"fno": "09904420",
"keywords": [
"Probability",
"Travel Industry",
"Video Signal Processing",
"Video Streaming",
"360 Degree Virtual Tourism Video",
"Video Subtitles",
"Head Movement Trajectory",
"360 Degree Streaming Videos",
"Extremely High Bandwidth Network",
"Viewport Prediction Methods",
"Video Saliency",
"Subtitle Based Viewport Prediction Model",
"Navigation",
"Bandwidth",
"Predictive Models",
"Streaming Media",
"Trajectory",
"Videos",
"360 Degree Video",
"Viewport Prediction",
"Virtual Tourism Videos",
"Video Subtitles",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Shibaura Institute of Technology,Graduate School of Engineering and Science,Tokyo,Japan",
"fullName": "Chuanzhe Jing",
"givenName": "Chuanzhe",
"surname": "Jing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shibaura Institute of Technology,Graduate School of Engineering and Science,Tokyo,Japan",
"fullName": "Tho Nguyen Duc",
"givenName": "Tho Nguyen",
"surname": "Duc",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shibaura Institute of Technology,Graduate School of Engineering and Science,Tokyo,Japan",
"fullName": "Phan Xuan Tan",
"givenName": "Phan Xuan",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shibaura Institute of Technology,Graduate School of Engineering and Science,Tokyo,Japan",
"fullName": "Eiji Kamioka",
"givenName": "Eiji",
"surname": "Kamioka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iisa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6390-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09904380",
"articleId": "1H5KyBcFYQw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904373",
"articleId": "1H5KqsQ2en6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a038",
"title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892319",
"title": "Adaptive 360-degree video streaming using layered video coding",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892319/12OmNyv7mbV",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a157",
"title": "Trajectory-Based Viewport Prediction for 360-Degree Virtual Reality Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a157/17D45WZZ7Fb",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2021/0668/0/066800a462",
"title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva",
"parentPublication": {
"id": "proceedings/msn/2021/0668/0",
"title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859789",
"title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a274",
"title": "Rate-Adaptive Streaming of 360-Degree Videos with Head-Motion-Aware Viewport Margins",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a274/1GvddU2H1Pq",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/07/09024132",
"title": "EPASS360: QoE-Aware 360-Degree Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2021/07/09024132/1hVmzVfm7sY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a082",
"title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09333964",
"title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1HBK0AfaGKk",
"title": "2022 IEEE Symposium on Computers and Communications (ISCC)",
"acronym": "iscc",
"groupId": "1000156",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1HBKgQaMnVS",
"doi": "10.1109/ISCC55528.2022.9912995",
"title": "LL-VAS: Adaptation Method for Low-Latency 360-degree Video Streaming over Mobile Networks",
"normalizedTitle": "LL-VAS: Adaptation Method for Low-Latency 360-degree Video Streaming over Mobile Networks",
"abstract": "With the ability to provide an “immersive experience”, 360-degree video-based applications are becoming more and more popular nowadays. In this paper, we propose LL-VAS, a novel adaptation method for low-latency 360-degree video streaming over mobile networks. By applying tile-based streaming, the proposed method allows 360-degree video streaming over resource-constrained mobile networks. In addition, by actively monitoring network throughput at the tile level, the proposed method can detect reductions in network throughput, and adapt video content in a timely manner to avoid re-buffering. Trace-driven experiments show that the proposed method can significantly decrease the number of re-buffering and re-buffering time under strong network throughput fluctuations and small buffer size when compared to reference methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the ability to provide an “immersive experience”, 360-degree video-based applications are becoming more and more popular nowadays. In this paper, we propose LL-VAS, a novel adaptation method for low-latency 360-degree video streaming over mobile networks. By applying tile-based streaming, the proposed method allows 360-degree video streaming over resource-constrained mobile networks. In addition, by actively monitoring network throughput at the tile level, the proposed method can detect reductions in network throughput, and adapt video content in a timely manner to avoid re-buffering. Trace-driven experiments show that the proposed method can significantly decrease the number of re-buffering and re-buffering time under strong network throughput fluctuations and small buffer size when compared to reference methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the ability to provide an “immersive experience”, 360-degree video-based applications are becoming more and more popular nowadays. In this paper, we propose LL-VAS, a novel adaptation method for low-latency 360-degree video streaming over mobile networks. By applying tile-based streaming, the proposed method allows 360-degree video streaming over resource-constrained mobile networks. In addition, by actively monitoring network throughput at the tile level, the proposed method can detect reductions in network throughput, and adapt video content in a timely manner to avoid re-buffering. Trace-driven experiments show that the proposed method can significantly decrease the number of re-buffering and re-buffering time under strong network throughput fluctuations and small buffer size when compared to reference methods.",
"fno": "09912995",
"keywords": [
"Computers",
"Fluctuations",
"Immersive Experience",
"Streaming Media",
"Throughput",
"Quality Assessment",
"Quality Of Experience",
"360 Degree Video",
"Low Latency Streaming",
"Mobile Networks"
],
"authors": [
{
"affiliation": "Tohoku Institute of Technology,Sendai,Japan",
"fullName": "Duc Nguyen",
"givenName": "Duc",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanoi University of Science and Technology,Hanoi,Vietnam",
"fullName": "Le Ngan",
"givenName": "Le",
"surname": "Ngan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanoi University of Science and Technology,Hanoi,Vietnam",
"fullName": "Lai Huyen Thuong",
"givenName": "Lai Huyen",
"surname": "Thuong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanoi University of Science and Technology,Hanoi,Vietnam",
"fullName": "Truong Thu Huong",
"givenName": "Truong Thu",
"surname": "Huong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9792-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09912852",
"articleId": "1HBKdDYNd4Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09913030",
"articleId": "1HBKlLeaPOE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09842378",
"title": "Muster: Multi-source Streaming for Tile-based 360° Videos within Cloud Native 5G Networks",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09842378/1FlM107xCMw",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859997",
"title": "TS360: A Two-Stage Deep Reinforcement Learning System for 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859997/1G9E35Tn6RW",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10011565",
"title": "Robust Saliency-Driven Quality Adaptation for Mobile 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10011565/1JNmCJK5FYY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10004009",
"title": "Macrotile: Toward QoE-Aware and Energy-Efficient 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10004009/1JwLoRnPwAg",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a290",
"title": "360SRL: A Sequential Reinforcement Learning Approach for ABR Tile-Based 360 Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a290/1cdOIGM7HjO",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2019/16/0/08999460",
"title": "Advancing user quality of experience in 360-degree video streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC",
"parentPublication": {
"id": "proceedings/ifip-networking/2019/16/0",
"title": "2019 IFIP Networking Conference (IFIP Networking)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc/2020/2394/0/239400a402",
"title": "A 360° Video Adaptive Streaming Scheme Based on Multiple Video Qualities",
"doi": null,
"abstractUrl": "/proceedings-article/ucc/2020/239400a402/1pZ0ZIjk5vq",
"parentPublication": {
"id": "proceedings/ucc/2020/2394/0",
"title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a057",
"title": "SEAWARE: Semantic Aware View Prediction System for 360-degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a057/1qBbGudwiM8",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/10/09351629",
"title": "A Hybrid Control Scheme for 360-Degree Dynamic Adaptive Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2022/10/09351629/1r50nXXcRuU",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7Wsluon4I",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00045",
"title": "RA360SR: A Real-time Acceleration-adaptive 360-degree Video Super-resolution System",
"normalizedTitle": "RA360SR: A Real-time Acceleration-adaptive 360-degree Video Super-resolution System",
"abstract": "With the development of virtual reality headsets, panoramic videos have started to gain increased popularity, and 360-degree video streaming technologies and devices have achieved significant success in the market. However, the video resolution provided by existing mainstream panoramic video cameras is not high enough to give users a viewing experience close to that of a conventional display, and the network bandwidth is another bottleneck for high-resolution 360-degree video streaming. Researchers have introduced numerous approaches to resolve these issues, including viewport prediction and regional super-resolution. Nevertheless, existing methods can-not ensure that users always have high-resolution viewport content because of the inevitable prediction error and the heavy server-side pre-process consumption. In this paper, we present RA360SR, a real-time acceleration-adaptive 360-degree video super-resolution system. We develop a dual-camera system with Unity3D post-processing to implement the real-time super-resolution model processing. Additionally, to obtain a more stable frame rate, we propose an acceleration-adaptive approach to switch the super-resolution model processing status based on the acceleration of users' head movements. Our results show that RA360SR can deliver a sharper video viewing experience for users while providing an acceptable frame rate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the development of virtual reality headsets, panoramic videos have started to gain increased popularity, and 360-degree video streaming technologies and devices have achieved significant success in the market. However, the video resolution provided by existing mainstream panoramic video cameras is not high enough to give users a viewing experience close to that of a conventional display, and the network bandwidth is another bottleneck for high-resolution 360-degree video streaming. Researchers have introduced numerous approaches to resolve these issues, including viewport prediction and regional super-resolution. Nevertheless, existing methods can-not ensure that users always have high-resolution viewport content because of the inevitable prediction error and the heavy server-side pre-process consumption. In this paper, we present RA360SR, a real-time acceleration-adaptive 360-degree video super-resolution system. We develop a dual-camera system with Unity3D post-processing to implement the real-time super-resolution model processing. Additionally, to obtain a more stable frame rate, we propose an acceleration-adaptive approach to switch the super-resolution model processing status based on the acceleration of users' head movements. Our results show that RA360SR can deliver a sharper video viewing experience for users while providing an acceptable frame rate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the development of virtual reality headsets, panoramic videos have started to gain increased popularity, and 360-degree video streaming technologies and devices have achieved significant success in the market. However, the video resolution provided by existing mainstream panoramic video cameras is not high enough to give users a viewing experience close to that of a conventional display, and the network bandwidth is another bottleneck for high-resolution 360-degree video streaming. Researchers have introduced numerous approaches to resolve these issues, including viewport prediction and regional super-resolution. Nevertheless, existing methods can-not ensure that users always have high-resolution viewport content because of the inevitable prediction error and the heavy server-side pre-process consumption. In this paper, we present RA360SR, a real-time acceleration-adaptive 360-degree video super-resolution system. We develop a dual-camera system with Unity3D post-processing to implement the real-time super-resolution model processing. Additionally, to obtain a more stable frame rate, we propose an acceleration-adaptive approach to switch the super-resolution model processing status based on the acceleration of users' head movements. Our results show that RA360SR can deliver a sharper video viewing experience for users while providing an acceptable frame rate.",
"fno": "536500a202",
"keywords": [
"Image Resolution",
"Industrial Robots",
"Rendering Computer Graphics",
"Telerobotics",
"Video Cameras",
"Video Signal Processing",
"Video Streaming",
"Virtual Reality",
"High Resolution 360 Degree Video",
"High Resolution Viewport Content",
"Mainstream Panoramic Video Cameras",
"Panoramic Videos",
"RA 360 SR",
"Real Time Acceleration Adaptive 360 Degree Video Super Resolution",
"Regional Super Resolution",
"Sharper Video Viewing Experience",
"Super Resolution Model Processing",
"Video Resolution",
"Headphones",
"Solid Modeling",
"Cybersickness",
"Superresolution",
"Switches",
"Bandwidth",
"Streaming Media",
"Virtual Reality",
"360 Degree Video",
"Super Resolution",
"Real Time"
],
"authors": [
{
"affiliation": "University of Central Florida",
"fullName": "Jiapeng Chi",
"givenName": "Jiapeng",
"surname": "Chi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Dirk Reiners",
"givenName": "Dirk",
"surname": "Reiners",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Carolina Cruz-Neira",
"givenName": "Carolina",
"surname": "Cruz-Neira",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "202-206",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a198",
"articleId": "1J7Wfnv0qZ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a207",
"articleId": "1J7WszthkjK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892319",
"title": "Adaptive 360-degree video streaming using layered video coding",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892319/12OmNyv7mbV",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a583",
"title": "Viewport-Adaptive Encoding and Streaming of 360-Degree Video for Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a583/12OmNzsJ7Ig",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/685700a065",
"title": "Tile-Based Rate Assignment for 360-Degree Video Based on Spatio-Temporal Activity Metrics",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/685700a065/17D45VVho3H",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a138",
"title": "L3BOU: Low Latency, Low Bandwidth, Optimized Super-Resolution Backhaul for 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a138/1A3j9j4t2Gk",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904420",
"title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904420/1H5KpY37ODe",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b766",
"title": "Cross-Resolution Flow Propagation for Foveated Video Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b766/1KxV3bSa3Fm",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0/649700a531",
"title": "VRFormer: 360-Degree Video Streaming with FoV Combined Prediction and Super resolution",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2022/649700a531/1LKwldiRY40",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0",
"title": "2022 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/07/09024132",
"title": "EPASS360: QoE-Aware 360-Degree Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2021/07/09024132/1hVmzVfm7sY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a085",
"title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1kwqNHC4Fy0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwr43GC3i8",
"doi": "10.1109/ICME46284.2020.9102836",
"title": "MA360: Multi-Agent Deep Reinforcement Learning Based Live 360-Degree Video Streaming on Edge",
"normalizedTitle": "MA360: Multi-Agent Deep Reinforcement Learning Based Live 360-Degree Video Streaming on Edge",
"abstract": "The mobile edge caching has made video service providers deliver live 360-degree videos worldwide. However, these services still suffer from the huge network traffic on the core network due to the spherical nature and the diverse requests generated from large user populations. It is challenging to optimize the Quality of Experience (QoE) and the bandwidth consumption simultaneously under the significant number of users as well as dynamic network and playback status. In this paper, we propose a Multi-Agent deep reinforcement learning based 360-degree video streaming system, named MA360, to tackle this multi-user live 360-degree video streaming problem in the context of the edge cache network. Specifically, MA360 employs the Mean Field Actor-Critic (MFAC) algorithm to make clients collaboratively and distributively request tiles aiming at maximizing the overall QoE while minimizing the total bandwidth consumption. Experiments over real-world datasets show that MA360 can improve the QoE while significantly reducing the bandwidth consumption compared with several state-of-the-art edge-assisted 360-degree video streaming strategies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The mobile edge caching has made video service providers deliver live 360-degree videos worldwide. However, these services still suffer from the huge network traffic on the core network due to the spherical nature and the diverse requests generated from large user populations. It is challenging to optimize the Quality of Experience (QoE) and the bandwidth consumption simultaneously under the significant number of users as well as dynamic network and playback status. In this paper, we propose a Multi-Agent deep reinforcement learning based 360-degree video streaming system, named MA360, to tackle this multi-user live 360-degree video streaming problem in the context of the edge cache network. Specifically, MA360 employs the Mean Field Actor-Critic (MFAC) algorithm to make clients collaboratively and distributively request tiles aiming at maximizing the overall QoE while minimizing the total bandwidth consumption. Experiments over real-world datasets show that MA360 can improve the QoE while significantly reducing the bandwidth consumption compared with several state-of-the-art edge-assisted 360-degree video streaming strategies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The mobile edge caching has made video service providers deliver live 360-degree videos worldwide. However, these services still suffer from the huge network traffic on the core network due to the spherical nature and the diverse requests generated from large user populations. It is challenging to optimize the Quality of Experience (QoE) and the bandwidth consumption simultaneously under the significant number of users as well as dynamic network and playback status. In this paper, we propose a Multi-Agent deep reinforcement learning based 360-degree video streaming system, named MA360, to tackle this multi-user live 360-degree video streaming problem in the context of the edge cache network. Specifically, MA360 employs the Mean Field Actor-Critic (MFAC) algorithm to make clients collaboratively and distributively request tiles aiming at maximizing the overall QoE while minimizing the total bandwidth consumption. Experiments over real-world datasets show that MA360 can improve the QoE while significantly reducing the bandwidth consumption compared with several state-of-the-art edge-assisted 360-degree video streaming strategies.",
"fno": "09102836",
"keywords": [
"Cache Storage",
"Client Server Systems",
"Internet",
"Learning Artificial Intelligence",
"Mobile Computing",
"Quality Of Experience",
"Video Streaming",
"MFAC Algorithm",
"Mean Field Actor Critic Algorithm",
"Edge Assisted 360 Degree Video Streaming Strategies",
"Bandwidth Consumption",
"Qo E",
"Quality Of Experience",
"Core Network",
"Network Traffic",
"Live 360 Degree Video Streaming",
"MA 360",
"Edge Cache Network",
"Video Service Providers",
"Mobile Edge Caching",
"Multiagent Deep Reinforcement Learning",
"Streaming Media",
"Servers",
"Bandwidth",
"Quality Of Experience",
"Bit Rate",
"Machine Learning",
"Resource Management",
"360 Degree Video",
"Live Video Streaming",
"Adaptive Streaming",
"Multi Agent Deep Reinforcement Learning"
],
"authors": [
{
"affiliation": "Peking University,Wangxuan Institute of Computer Technology,Beijing,China",
"fullName": "Yixuan Ban",
"givenName": "Yixuan",
"surname": "Ban",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Peking University,Wangxuan Institute of Computer Technology,Beijing,China",
"fullName": "Yuanxing Zhang",
"givenName": "Yuanxing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Peking University,Wangxuan Institute of Computer Technology,Beijing,China",
"fullName": "Haodan Zhang",
"givenName": "Haodan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Peking University,Wangxuan Institute of Computer Technology,Beijing,China",
"fullName": "Xinggong Zhang",
"givenName": "Xinggong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Peking University,Wangxuan Institute of Computer Technology,Beijing,China",
"fullName": "Zongming Guo",
"givenName": "Zongming",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1331-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09102845",
"articleId": "1kwr5pMZkJi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09102952",
"articleId": "1kwr4qEHARO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859997",
"title": "TS360: A Two-Stage Deep Reinforcement Learning System for 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859997/1G9E35Tn6RW",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859789",
"title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2022/9792/0/09913007",
"title": "Deep Reinforcement Learning Based Adaptive 360-degree Video Streaming with Field of View Joint Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2022/09913007/1HBK3Mimize",
"parentPublication": {
"id": "proceedings/iscc/2022/9792/0",
"title": "2022 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10011565",
"title": "Robust Saliency-Driven Quality Adaptation for Mobile 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10011565/1JNmCJK5FYY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a290",
"title": "360SRL: A Sequential Reinforcement Learning Approach for ABR Tile-Based 360 Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a290/1cdOIGM7HjO",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2019/16/0/08999460",
"title": "Advancing user quality of experience in 360-degree video streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC",
"parentPublication": {
"id": "proceedings/ifip-networking/2019/16/0",
"title": "2019 IFIP Networking Conference (IFIP Networking)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/07/09024132",
"title": "EPASS360: QoE-Aware 360-Degree Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2021/07/09024132/1hVmzVfm7sY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/07/09261971",
"title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icws/2021/1681/0/168100a208",
"title": "CUBIST: High-Quality 360-Degree Video Streaming Services via Tile-based Edge Caching and FoV-Adaptive Prefetching",
"doi": null,
"abstractUrl": "/proceedings-article/icws/2021/168100a208/1yrHDztOVck",
"parentPublication": {
"id": "proceedings/icws/2021/1681/0",
"title": "2021 IEEE International Conference on Web Services (ICWS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qTrKNlwI0w",
"title": "2021 International Conference on Information Networking (ICOIN)",
"acronym": "icoin",
"groupId": "1000363",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1qTrL1nfEyc",
"doi": "10.1109/ICOIN50884.2021.9333964",
"title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming",
"normalizedTitle": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming",
"abstract": "Because 360-degree video streaming has become significantly popular in the field of virtual reality, the viewport-adaptive tiled streaming technology for 360-degree video is emerging. This paper presents a viewport tile extractor (VTE) that is implemented on high-efficiency video coding (HEVC). The VTE extracts multiple tiles that represent the viewport of a user and merges them into one bitstream. The proposed system transmits the bitstream of high-quality tiles and the low-quality video bitstream of entire area to reduce both latency and bandwidth. The proposed method shows more than 16.98% of bjontegaard delta rate saving in terms of the luma peak signal-to-noise ratio, compared with the HEVC-compliant streaming method. Additionally, compared with the existing tiled streaming method, it achieves 66.16% and 69.79% saving of decoding memory and time consumption, respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Because 360-degree video streaming has become significantly popular in the field of virtual reality, the viewport-adaptive tiled streaming technology for 360-degree video is emerging. This paper presents a viewport tile extractor (VTE) that is implemented on high-efficiency video coding (HEVC). The VTE extracts multiple tiles that represent the viewport of a user and merges them into one bitstream. The proposed system transmits the bitstream of high-quality tiles and the low-quality video bitstream of entire area to reduce both latency and bandwidth. The proposed method shows more than 16.98% of bjontegaard delta rate saving in terms of the luma peak signal-to-noise ratio, compared with the HEVC-compliant streaming method. Additionally, compared with the existing tiled streaming method, it achieves 66.16% and 69.79% saving of decoding memory and time consumption, respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Because 360-degree video streaming has become significantly popular in the field of virtual reality, the viewport-adaptive tiled streaming technology for 360-degree video is emerging. This paper presents a viewport tile extractor (VTE) that is implemented on high-efficiency video coding (HEVC). The VTE extracts multiple tiles that represent the viewport of a user and merges them into one bitstream. The proposed system transmits the bitstream of high-quality tiles and the low-quality video bitstream of entire area to reduce both latency and bandwidth. The proposed method shows more than 16.98% of bjontegaard delta rate saving in terms of the luma peak signal-to-noise ratio, compared with the HEVC-compliant streaming method. Additionally, compared with the existing tiled streaming method, it achieves 66.16% and 69.79% saving of decoding memory and time consumption, respectively.",
"fno": "09333964",
"keywords": [
"Video Coding",
"Video Streaming",
"Virtual Reality",
"Viewport Adaptive 360 Degree Video Tiled Streaming",
"VTE",
"High Efficiency Video Coding",
"Multiple Tiles",
"Low Quality Video Bitstream",
"HEVC Compliant Streaming Method",
"Viewport Tile Extractor",
"Decoding Memory",
"Signal To Noise Ratio",
"Bjontegaard Delta",
"Virtual Reality",
"PSNR",
"Memory Management",
"Virtual Reality",
"Streaming Media",
"Decoding",
"Servers",
"High Efficiency Video Coding",
"Virtual Reality",
"HEVC",
"MCTS",
"Viewport Adaptive Streaming",
"360 Degree Video"
],
"authors": [
{
"affiliation": "Sungkyunkwan University (SKKU),Department of Computer Education,Seoul,Republic of Korea",
"fullName": "Jong-Beom Jeong",
"givenName": "Jong-Beom",
"surname": "Jeong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sungkyunkwan University (SKKU),Department of Computer Education,Seoul,Republic of Korea",
"fullName": "Soonbin Lee",
"givenName": "Soonbin",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sungkyunkwan University (SKKU),Department of Computer Education,Seoul,Republic of Korea",
"fullName": "Inae Kim",
"givenName": "Inae",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sungkyunkwan University (SKKU),Department of Computer Education,Seoul,Republic of Korea",
"fullName": "Eun-Seok Ryu",
"givenName": "Eun-Seok",
"surname": "Ryu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoin",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "8-12",
"year": "2021",
"issn": "1976-7684",
"isbn": "978-1-7281-9101-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09333876",
"articleId": "1qTrQzQTZuw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09333995",
"articleId": "1qTrVVmoIW4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a399",
"title": "Tile Based HEVC Video for Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a399/12OmNwF0BRC",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a038",
"title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/685700a065",
"title": "Tile-Based Rate Assignment for 360-Degree Video Based on Spatio-Temporal Activity Metrics",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/685700a065/17D45VVho3H",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a157",
"title": "Trajectory-Based Viewport Prediction for 360-Degree Virtual Reality Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a157/17D45WZZ7Fb",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2021/0668/0/066800a462",
"title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva",
"parentPublication": {
"id": "proceedings/msn/2021/0668/0",
"title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859789",
"title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a274",
"title": "Rate-Adaptive Streaming of 360-Degree Videos with Head-Motion-Aware Viewport Margins",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a274/1GvddU2H1Pq",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904420",
"title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904420/1H5KpY37ODe",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a324",
"title": "VAS360: QoE-Driven Viewport Adaptive Streaming for 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a324/1cJ0BSNq6FW",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a085",
"title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yrHuRJhuog",
"title": "2021 IEEE International Conference on Web Services (ICWS)",
"acronym": "icws",
"groupId": "1001210",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yrHDztOVck",
"doi": "10.1109/ICWS53863.2021.00039",
"title": "CUBIST: High-Quality 360-Degree Video Streaming Services via Tile-based Edge Caching and FoV-Adaptive Prefetching",
"normalizedTitle": "CUBIST: High-Quality 360-Degree Video Streaming Services via Tile-based Edge Caching and FoV-Adaptive Prefetching",
"abstract": "360-degree video streaming, which is becoming more and more popular as the fast development of VR/AR applications nowadays due to the immersive viewing experience it can offer, poses enormous challenges to the current network infrastructure in terms of high bandwidth and low latency requirements. To address this problem and to ensure the QoE (quality of experience) of end-users, this paper presents CUBIST, a method and system for high-quality 360-degree video streaming in networks with cache nodes at the edge. To the best of our knowledge, it is the first tile-based edge caching solution that incorporates proactive tile prefetching and hierarchical cache organization into reactive caching to maximize the caching benefit while reducing the cost of 360-degree video streaming. Experimental results show that CUBIST can achieve a cache hit ratio of 87 % and improve the effective video bitrate by 12.9 % with most rate transitions being small when compared with the latest FoV-aware edge caching scheme.",
"abstracts": [
{
"abstractType": "Regular",
"content": "360-degree video streaming, which is becoming more and more popular as the fast development of VR/AR applications nowadays due to the immersive viewing experience it can offer, poses enormous challenges to the current network infrastructure in terms of high bandwidth and low latency requirements. To address this problem and to ensure the QoE (quality of experience) of end-users, this paper presents CUBIST, a method and system for high-quality 360-degree video streaming in networks with cache nodes at the edge. To the best of our knowledge, it is the first tile-based edge caching solution that incorporates proactive tile prefetching and hierarchical cache organization into reactive caching to maximize the caching benefit while reducing the cost of 360-degree video streaming. Experimental results show that CUBIST can achieve a cache hit ratio of 87 % and improve the effective video bitrate by 12.9 % with most rate transitions being small when compared with the latest FoV-aware edge caching scheme.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "360-degree video streaming, which is becoming more and more popular as the fast development of VR/AR applications nowadays due to the immersive viewing experience it can offer, poses enormous challenges to the current network infrastructure in terms of high bandwidth and low latency requirements. To address this problem and to ensure the QoE (quality of experience) of end-users, this paper presents CUBIST, a method and system for high-quality 360-degree video streaming in networks with cache nodes at the edge. To the best of our knowledge, it is the first tile-based edge caching solution that incorporates proactive tile prefetching and hierarchical cache organization into reactive caching to maximize the caching benefit while reducing the cost of 360-degree video streaming. Experimental results show that CUBIST can achieve a cache hit ratio of 87 % and improve the effective video bitrate by 12.9 % with most rate transitions being small when compared with the latest FoV-aware edge caching scheme.",
"fno": "168100a208",
"keywords": [
"Quality Of Experience",
"Video Streaming",
"Quality Of Experience",
"CUBIST",
"Cache Nodes",
"Tile Based Edge Caching Solution",
"Proactive Tile Prefetching",
"High Quality 360 Degree Video Streaming Services",
"Fo V Adaptive Prefetching",
"Immersive Viewing Experience",
"Hierarchical Reactive Cache Organization",
"Fo V Aware Edge Caching Scheme",
"VR AR Application",
"Qo E",
"Costs",
"Web Services",
"Prefetching",
"Bit Rate",
"Bandwidth",
"Transcoding",
"Streaming Media",
"360 Degree Video",
"Edge Caching",
"Fo V Adaptive Prefetching",
"Qo E"
],
"authors": [
{
"affiliation": "Beijing National Research Center for Information Science and Technology, Tsinghua University,Department of Computer Science and Technology,Beijing,China,100084",
"fullName": "Dongbiao He",
"givenName": "Dongbiao",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing National Research Center for Information Science and Technology, Tsinghua University,Department of Computer Science and Technology,Beijing,China,100084",
"fullName": "Jinlei Jiang",
"givenName": "Jinlei",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing National Research Center for Information Science and Technology, Tsinghua University,Department of Computer Science and Technology,Beijing,China,100084",
"fullName": "Teng Ma",
"givenName": "Teng",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing National Research Center for Information Science and Technology, Tsinghua University,Department of Computer Science and Technology,Beijing,China,100084",
"fullName": "Guangwen Yang",
"givenName": "Guangwen",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Futurewei Technologies, Inc.,Santa Clara,CA,USA,95050",
"fullName": "Cedric Westphal",
"givenName": "Cedric",
"surname": "Westphal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California Santa Cruz,Computer Science and Engineering Department,Santa Cruz,CA,USA,95064",
"fullName": "JJ Garcia-Luna-Aceves",
"givenName": "JJ",
"surname": "Garcia-Luna-Aceves",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua Shenzhen International Graduate School, Tsinghua University,Shenzhen,China,518055",
"fullName": "Shu-Tao Xia",
"givenName": "Shu-Tao",
"surname": "Xia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icws",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "208-218",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1681-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "168100a198",
"articleId": "1yrHHe3H6ve",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "168100a219",
"articleId": "1yrHAdTmRr2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2018/4195/0/08551493",
"title": "Tile-Based Qoe-Driven Http/2 Streaming System For 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551493/17D45VTRooi",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/685700a065",
"title": "Tile-Based Rate Assignment for 360-Degree Video Based on Spatio-Temporal Activity Metrics",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/685700a065/17D45VVho3H",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2021/0668/0/066800a462",
"title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva",
"parentPublication": {
"id": "proceedings/msn/2021/0668/0",
"title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09842378",
"title": "Muster: Multi-source Streaming for Tile-based 360° Videos within Cloud Native 5G Networks",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09842378/1FlM107xCMw",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2022/9792/0/09913007",
"title": "Deep Reinforcement Learning Based Adaptive 360-degree Video Streaming with Field of View Joint Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2022/09913007/1HBK3Mimize",
"parentPublication": {
"id": "proceedings/iscc/2022/9792/0",
"title": "2022 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a290",
"title": "360SRL: A Sequential Reinforcement Learning Approach for ABR Tile-Based 360 Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a290/1cdOIGM7HjO",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2020/8780/0/878000a337",
"title": "Allies: Tile-Based Joint Transcoding, Delivery and Caching of 360° Videos in Edge Cloud Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2020/878000a337/1pF6lo64jOo",
"parentPublication": {
"id": "proceedings/cloud/2020/8780/0",
"title": "2020 IEEE 13th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a077",
"title": "CooPEC: Cooperative Prefetching and Edge Caching for Adaptive 360° Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a077/1qBbI2Tm5os",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/10/09351629",
"title": "A Hybrid Control Scheme for 360-Degree Dynamic Adaptive Video Streaming Over Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2022/10/09351629/1r50nXXcRuU",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2020/9916/0/991600a441",
"title": "Tile-based Multi-source Adaptive Streaming for 360-degree Ultra-High-Definition Videos",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2020/991600a441/1sBO8MZ4kx2",
"parentPublication": {
"id": "proceedings/msn/2020/9916/0",
"title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwkhTj9",
"title": "2009 First International Workshop on Database Technology and Applications, DBTA",
"acronym": "dbta",
"groupId": "1002840",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBpmDFG",
"doi": "10.1109/DBTA.2009.152",
"title": "Effect of Navigation Aids and Landmarks on Acquisition of Spatial Knowledge in Virtual Environments",
"normalizedTitle": "Effect of Navigation Aids and Landmarks on Acquisition of Spatial Knowledge in Virtual Environments",
"abstract": "The effect of navigation aids and landmarkson spatial learning was investigated when participants repeatedly navigated four complex three-dimensional virtual mazes. The study was divided into two main phases: learning and a test of learning transfer. The learning phase consisted of participants directly navigating in one of the four virtual mazes with or without navigation aids and landmarks. Learning transfer was examined by testing navigation tasks in the environment without the use of the navigation aids. Findings show that the combined impact of both navigation aids and landmarks on spatial knowledge acquisition in navigation tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The effect of navigation aids and landmarkson spatial learning was investigated when participants repeatedly navigated four complex three-dimensional virtual mazes. The study was divided into two main phases: learning and a test of learning transfer. The learning phase consisted of participants directly navigating in one of the four virtual mazes with or without navigation aids and landmarks. Learning transfer was examined by testing navigation tasks in the environment without the use of the navigation aids. Findings show that the combined impact of both navigation aids and landmarks on spatial knowledge acquisition in navigation tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The effect of navigation aids and landmarkson spatial learning was investigated when participants repeatedly navigated four complex three-dimensional virtual mazes. The study was divided into two main phases: learning and a test of learning transfer. The learning phase consisted of participants directly navigating in one of the four virtual mazes with or without navigation aids and landmarks. Learning transfer was examined by testing navigation tasks in the environment without the use of the navigation aids. Findings show that the combined impact of both navigation aids and landmarks on spatial knowledge acquisition in navigation tasks.",
"fno": "3604a030",
"keywords": [
"Spatial Knowledge",
"Virtual Mazes",
"Navigation"
],
"authors": [
{
"affiliation": null,
"fullName": "Zuo Wuheng",
"givenName": "Zuo",
"surname": "Wuheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xu Baihua",
"givenName": "Xu",
"surname": "Baihua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuan He",
"givenName": "Yuan",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Feng Zhilin",
"givenName": "Feng",
"surname": "Zhilin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dbta",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "30-32",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3604-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3604a025",
"articleId": "12OmNqG0SJc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3604a033",
"articleId": "12OmNCvcLJJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2004/8415/0/84150173",
"title": "Navigation with Place Representations and Visible Landmarks",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2004/84150173/12OmNBTJIzP",
"parentPublication": {
"id": "proceedings/vr/2004/8415/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492761",
"title": "The effect of trails on first-time and subsequent navigation in a virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492761/12OmNBfqG2v",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2010/3869/0/01-10-06",
"title": "Limitations of Signs as Navigation Aids in Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2010/01-10-06/12OmNqGRGn6",
"parentPublication": {
"id": "proceedings/hicss/2010/3869/0",
"title": "2010 43rd Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/169520848",
"title": "A Smart Sensor Based Visual Landmarks Detection for Indoor Robot Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169520848/12OmNqIQS5E",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/04741362",
"title": "Research on Navigation-Aids Information System",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/04741362/12OmNwDSdzI",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341058",
"title": "Using isolated landmarks and trajectories in robot navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341058/12OmNxecRR2",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cinc/2009/3645/2/3645b534",
"title": "Effect of Interaction Characteristics of Virtual Environments on Spatial Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cinc/2009/3645b534/12OmNzWx03O",
"parentPublication": {
"id": "cinc/2009/3645/2",
"title": "Computational Intelligence and Natural Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a930",
"title": "[DC] Leveraging AR Cues towards New Navigation Assistant Paradigm",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a930/1CJcTykeypq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5407",
"title": "Less is More: Generating Grounded Navigation Instructions from Landmarks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5407/1H1hxJiUhOw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090647",
"title": "The Effect of Navigational Aids on Spatial Memory in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090647/1jIxAjW4aWI",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqGA5i5",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"acronym": "isdea",
"groupId": "1800333",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrYlmR8",
"doi": "10.1109/ISdea.2012.730",
"title": "Artificial Landmark Positioning System Using Omnidirectional Vision for Agricultural Vehicle Navigation",
"normalizedTitle": "Artificial Landmark Positioning System Using Omnidirectional Vision for Agricultural Vehicle Navigation",
"abstract": "The positioning of agricultural vehicle for autonomous navigation systems is important. We proposed a novel simple and inexpensive positioning system utilized by four artificial landmarks and an omni directional vision sensor. The red intensity pixels not less than threshold were extracted as a small area in the omni directional image and the center of the gravity was calculated as one landmark position. The direction angles between the neighboring landmarks were calculated after detecting all landmarks. Location of the vehicle was estimated by the center of gravity of four intersections formed by four arcs according to geometric transformation based on four direction angles of four landmarks with camera. In the field experiment, the sensor was fixed on the ground 50 m × 50 m square area in the sunshine to get clear images and examined the accuracy of the position estimation algorithm. Another experiment was done to test practical application with vehicle in a straight-line driving. The field experimental results showed that the mean absolute error of the four direction angles was about 1 to 2 degree and the RMS error in distance was about 24.51 cm. The vehicle practical experimental results showed that the real-time distance errors were mostly between about 3 cm to 10 cm. The RMS error was about 8.34 cm. This positioning system is a simple, feasible and prospective system for agricultural vehicle navigation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The positioning of agricultural vehicle for autonomous navigation systems is important. We proposed a novel simple and inexpensive positioning system utilized by four artificial landmarks and an omni directional vision sensor. The red intensity pixels not less than threshold were extracted as a small area in the omni directional image and the center of the gravity was calculated as one landmark position. The direction angles between the neighboring landmarks were calculated after detecting all landmarks. Location of the vehicle was estimated by the center of gravity of four intersections formed by four arcs according to geometric transformation based on four direction angles of four landmarks with camera. In the field experiment, the sensor was fixed on the ground 50 m × 50 m square area in the sunshine to get clear images and examined the accuracy of the position estimation algorithm. Another experiment was done to test practical application with vehicle in a straight-line driving. The field experimental results showed that the mean absolute error of the four direction angles was about 1 to 2 degree and the RMS error in distance was about 24.51 cm. The vehicle practical experimental results showed that the real-time distance errors were mostly between about 3 cm to 10 cm. The RMS error was about 8.34 cm. This positioning system is a simple, feasible and prospective system for agricultural vehicle navigation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The positioning of agricultural vehicle for autonomous navigation systems is important. We proposed a novel simple and inexpensive positioning system utilized by four artificial landmarks and an omni directional vision sensor. The red intensity pixels not less than threshold were extracted as a small area in the omni directional image and the center of the gravity was calculated as one landmark position. The direction angles between the neighboring landmarks were calculated after detecting all landmarks. Location of the vehicle was estimated by the center of gravity of four intersections formed by four arcs according to geometric transformation based on four direction angles of four landmarks with camera. In the field experiment, the sensor was fixed on the ground 50 m × 50 m square area in the sunshine to get clear images and examined the accuracy of the position estimation algorithm. Another experiment was done to test practical application with vehicle in a straight-line driving. The field experimental results showed that the mean absolute error of the four direction angles was about 1 to 2 degree and the RMS error in distance was about 24.51 cm. The vehicle practical experimental results showed that the real-time distance errors were mostly between about 3 cm to 10 cm. The RMS error was about 8.34 cm. This positioning system is a simple, feasible and prospective system for agricultural vehicle navigation.",
"fno": "4608a665",
"keywords": [
"Agricultural Vehicle",
"Navigation",
"Artificial Landmark",
"Omnidirectional Vision Sensor"
],
"authors": [
{
"affiliation": null,
"fullName": "Ming Li",
"givenName": "Ming",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zh. H. Liu",
"givenName": "Zh. H.",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "J.A. Huang",
"givenName": "J.A.",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S.H. Dai",
"givenName": "S.H.",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K. Wakabayashi",
"givenName": "K.",
"surname": "Wakabayashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K. Imou",
"givenName": "K.",
"surname": "Imou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isdea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-01-01T00:00:00",
"pubType": "proceedings",
"pages": "665-669",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4608-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4608a661",
"articleId": "12OmNvq5jGv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4608a670",
"articleId": "12OmNx5GUaC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/uksim/2012/4682/0/4682a265",
"title": "A Quantitative Evaluation Method of Landmark Effectiveness for Pedestrian Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/uksim/2012/4682a265/12OmNAqkSFH",
"parentPublication": {
"id": "proceedings/uksim/2012/4682/0",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a278",
"title": "Visual Landmark-Based Localization for MAVs Using Incremental Feature Updates",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a278/12OmNArbG56",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eurobot/1996/7695/0/76950068",
"title": "Landmark-based autonomous navigation in sewerage pipes",
"doi": null,
"abstractUrl": "/proceedings-article/eurobot/1996/76950068/12OmNBbsiep",
"parentPublication": {
"id": "proceedings/eurobot/1996/7695/0",
"title": "Advanced Mobile Robots, Euromicro Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2006/2503/0/25030073",
"title": "A Landmark Paper in Face Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030073/12OmNrJiD0D",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2008/3304/6/3304f183",
"title": "Omnidirectional Vision Tracking and Positioning for Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304f183/12OmNs59JI5",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/6",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a700",
"title": "Landmark-Based Localization for Indoor Mobile Robots with Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a700/12OmNyo1o02",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082150",
"title": "Navigation system based on ceiling landmark recognition for autonomous mobile robot-landmark detection based on fuzzy template matching (FTM)",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082150/12OmNyugz3B",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/5/3736e269",
"title": "A Unique Multi-functional Landmark for Autonomous Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736e269/12OmNzJbQVO",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/5",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/1/4077a007",
"title": "3D Positioning for Mobile Robot Using Omnidirectional Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077a007/12OmNzRqdG3",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/01/i0078",
"title": "Iconic Memory-Based Omnidirectional Route Panorama Navigation",
"doi": null,
"abstractUrl": "/journal/tp/2005/01/i0078/13rRUxZ0o2x",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBDyAb4",
"title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwlqhJO",
"doi": "10.1109/SVR.2016.25",
"title": "A Study in Virtual Navigation Cues for Forklift Operators",
"normalizedTitle": "A Study in Virtual Navigation Cues for Forklift Operators",
"abstract": "Augmented Reality (AR) is a technology that can overlap virtual elements over the real world in real time. This research focuses on studying how different AR elements can help forklift operators locate pallets as quickly as possible in a warehouse environment. We have developed a simulated AR environment to test Egocentric or Exocentric virtual navigation cues. The virtual elements were displayed to the user in a HUD (head-up display) on the forklift windshield, fixed place in front of the user operator, or in a HMD (head-mounted display), where the virtual cues are attached to the head of the user. A user study found that the Egocentric AR view was preferred over the Exocentric condition and performed better while the HUD and HMD viewing methods produced no difference in performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality (AR) is a technology that can overlap virtual elements over the real world in real time. This research focuses on studying how different AR elements can help forklift operators locate pallets as quickly as possible in a warehouse environment. We have developed a simulated AR environment to test Egocentric or Exocentric virtual navigation cues. The virtual elements were displayed to the user in a HUD (head-up display) on the forklift windshield, fixed place in front of the user operator, or in a HMD (head-mounted display), where the virtual cues are attached to the head of the user. A user study found that the Egocentric AR view was preferred over the Exocentric condition and performed better while the HUD and HMD viewing methods produced no difference in performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality (AR) is a technology that can overlap virtual elements over the real world in real time. This research focuses on studying how different AR elements can help forklift operators locate pallets as quickly as possible in a warehouse environment. We have developed a simulated AR environment to test Egocentric or Exocentric virtual navigation cues. The virtual elements were displayed to the user in a HUD (head-up display) on the forklift windshield, fixed place in front of the user operator, or in a HMD (head-mounted display), where the virtual cues are attached to the head of the user. A user study found that the Egocentric AR view was preferred over the Exocentric condition and performed better while the HUD and HMD viewing methods produced no difference in performance.",
"fno": "4149a095",
"keywords": [
"Navigation",
"Vehicles",
"Solid Modeling",
"Three Dimensional Displays",
"Augmented Reality",
"Automotive Components",
"Australia",
"Navigation",
"Augmented Reality",
"Logistics",
"Forklift"
],
"authors": [
{
"affiliation": null,
"fullName": "Alexandre Pereira",
"givenName": "Alexandre",
"surname": "Pereira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gun A. Lee",
"givenName": "Gun A.",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Edson Almeida",
"givenName": "Edson",
"surname": "Almeida",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "95-99",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4149-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4149a081",
"articleId": "12OmNxjjEdm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4149a100",
"articleId": "12OmNy4IF9u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671779",
"title": "A camera-based calibration for automotive augmented reality Head-Up-Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671779/12OmNBrlPB1",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504725",
"title": "Casting shadows: Ecological interface design for augmented reality pedestrian collision warning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504725/12OmNC8uRtR",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836460",
"title": "An Augmented Reality Guide for Assisting Forklift Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836460/12OmNvwTGFS",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836523",
"title": "Human Attention and fatigue for AR Head-Up Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223407",
"title": "Optical see-through HUDs effect on depth judgments of real world objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223407/12OmNyRg4pk",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223465",
"title": "Optical see-through head up displays' effect on depth judgments of real world objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223465/12OmNybfr2x",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836468",
"title": "Visualisation of the Electronic Horizon in Head-Up-Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836468/12OmNzDehaq",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446213",
"title": "A Calibration Method for On-Vehicle AR-HUD System Using Mixed Reality Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446213/13bd1eNNYnr",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466859",
"title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a930",
"title": "[DC] Leveraging AR Cues towards New Navigation Assistant Paradigm",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a930/1CJcTykeypq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBp52yg",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"acronym": "iros",
"groupId": "1000393",
"volume": "2",
"displayVolume": "2",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyugz3B",
"doi": "10.1109/IROS.1995.526153",
"title": "Navigation system based on ceiling landmark recognition for autonomous mobile robot-landmark detection based on fuzzy template matching (FTM)",
"normalizedTitle": "Navigation system based on ceiling landmark recognition for autonomous mobile robot-landmark detection based on fuzzy template matching (FTM)",
"abstract": "We propose a vision based navigation system for autonomous mobile robots that recognizes outlets of air conditioning system (anemo) located on the ceiling as landmarks. Because landmarks on the ceiling are not obstructed by other objects, robots can find them easier than landmarks on the floor. To detect the landmark, we applied the fuzzy template matching (FTM) method. Based on FTM, a fuzzy template (FT) is modeled based on the edge information of landmarks in the image. In addition, we use neural network (NN) to detect landmarks quickly. Inputs of NN are the grades of fuzzy memberships and the learned NN directs FT to the landmark in the image. Because FTM does not use image processing, except edge extraction, it does not make processing error and the landmark can be detected precisely. The navigation system can calculate the distance and the angle of the anemo from the map information. As a result, the navigation system can identify the present robot's position and orientation. By the proposed algorithm, precise and fast landmark detection become possible even if the landmark follows the shadow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a vision based navigation system for autonomous mobile robots that recognizes outlets of air conditioning system (anemo) located on the ceiling as landmarks. Because landmarks on the ceiling are not obstructed by other objects, robots can find them easier than landmarks on the floor. To detect the landmark, we applied the fuzzy template matching (FTM) method. Based on FTM, a fuzzy template (FT) is modeled based on the edge information of landmarks in the image. In addition, we use neural network (NN) to detect landmarks quickly. Inputs of NN are the grades of fuzzy memberships and the learned NN directs FT to the landmark in the image. Because FTM does not use image processing, except edge extraction, it does not make processing error and the landmark can be detected precisely. The navigation system can calculate the distance and the angle of the anemo from the map information. As a result, the navigation system can identify the present robot's position and orientation. By the proposed algorithm, precise and fast landmark detection become possible even if the landmark follows the shadow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a vision based navigation system for autonomous mobile robots that recognizes outlets of air conditioning system (anemo) located on the ceiling as landmarks. Because landmarks on the ceiling are not obstructed by other objects, robots can find them easier than landmarks on the floor. To detect the landmark, we applied the fuzzy template matching (FTM) method. Based on FTM, a fuzzy template (FT) is modeled based on the edge information of landmarks in the image. In addition, we use neural network (NN) to detect landmarks quickly. Inputs of NN are the grades of fuzzy memberships and the learned NN directs FT to the landmark in the image. Because FTM does not use image processing, except edge extraction, it does not make processing error and the landmark can be detected precisely. The navigation system can calculate the distance and the angle of the anemo from the map information. As a result, the navigation system can identify the present robot's position and orientation. By the proposed algorithm, precise and fast landmark detection become possible even if the landmark follows the shadow.",
"fno": "71082150",
"keywords": [
"Mobile Robots Path Planning Fuzzy Control Neural Nets Navigation Edge Detection Robot Vision Image Matching Position Control Ceiling Landmark Recognition Autonomous Mobile Robot Navigation Fuzzy Template Matching Air Conditioning System Outlets Edge Detection Neural Network Fuzzy Memberships Orientation"
],
"authors": [
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "T. Fukuda",
"givenName": "T.",
"surname": "Fukuda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "S. Ito",
"givenName": "S.",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "F. Arai",
"givenName": "F.",
"surname": "Arai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "Y. Yokoyama",
"givenName": "Y.",
"surname": "Yokoyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "Y. Abe",
"givenName": "Y.",
"surname": "Abe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "K. Tanaka",
"givenName": "K.",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Mech.-Inf. & Syst., Nagoya Univ., Japan",
"fullName": "Y. Tanaka",
"givenName": "Y.",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iros",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2150",
"year": "1995",
"issn": null,
"isbn": "0-8186-7108-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "71082144",
"articleId": "12OmNxveNGC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "71082156",
"articleId": "12OmNz3bdLK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAfgwyT",
"title": "2009 Fifth International Conference on Natural Computation",
"acronym": "icnc",
"groupId": "1001312",
"volume": "5",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzJbQVO",
"doi": "10.1109/ICNC.2009.525",
"title": "A Unique Multi-functional Landmark for Autonomous Navigation",
"normalizedTitle": "A Unique Multi-functional Landmark for Autonomous Navigation",
"abstract": "To save time, improve flexibility and guarantee accuracy of the navigation system of the mobile robots, a multi-function landmark and a new navigation algorithm are proposed in this paper. With the new algorithm and landmark the mobile robots can be located by only a single landmark, and can be navigated in a large area by serial landmarks. The simple configuration and the convenient material of the landmark increased the system's flexibility and decreased the complexity of the algorithm. From the experimental results it can be concluded that the navigation system has fast navigation speed and high navigation accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To save time, improve flexibility and guarantee accuracy of the navigation system of the mobile robots, a multi-function landmark and a new navigation algorithm are proposed in this paper. With the new algorithm and landmark the mobile robots can be located by only a single landmark, and can be navigated in a large area by serial landmarks. The simple configuration and the convenient material of the landmark increased the system's flexibility and decreased the complexity of the algorithm. From the experimental results it can be concluded that the navigation system has fast navigation speed and high navigation accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To save time, improve flexibility and guarantee accuracy of the navigation system of the mobile robots, a multi-function landmark and a new navigation algorithm are proposed in this paper. With the new algorithm and landmark the mobile robots can be located by only a single landmark, and can be navigated in a large area by serial landmarks. The simple configuration and the convenient material of the landmark increased the system's flexibility and decreased the complexity of the algorithm. From the experimental results it can be concluded that the navigation system has fast navigation speed and high navigation accuracy.",
"fno": "3736e269",
"keywords": [
"Landmark Design",
"Navigation Algorithm",
"Mobile Robots"
],
"authors": [
{
"affiliation": null,
"fullName": "Dan Wang",
"givenName": "Dan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yunwei Jia",
"givenName": "Yunwei",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zuoliang Cao",
"givenName": "Zuoliang",
"surname": "Cao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icnc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-08-01T00:00:00",
"pubType": "proceedings",
"pages": "269-273",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3736-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3736e257",
"articleId": "12OmNAWH9F1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3736e274",
"articleId": "12OmNASraRq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/enc/2009/3882/0/3882a155",
"title": "Mobile Robots Navigation in Industrial Environments",
"doi": null,
"abstractUrl": "/proceedings-article/enc/2009/3882a155/12OmNB1eJxv",
"parentPublication": {
"id": "proceedings/enc/2009/3882/0",
"title": "2009 Mexican International Conference on Computer Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a665",
"title": "Artificial Landmark Positioning System Using Omnidirectional Vision for Agricultural Vehicle Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a665/12OmNrYlmR8",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apwc-on-cse/2014/1955/0/07053847",
"title": "Landmark aided navigation of a point-mass robot via Lyapunov-based control scheme",
"doi": null,
"abstractUrl": "/proceedings-article/apwc-on-cse/2014/07053847/12OmNvwTGEu",
"parentPublication": {
"id": "proceedings/apwc-on-cse/2014/1955/0",
"title": "2014 Asia-Pacific World Congress on Computer Science and Engineering (APWC on CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/289/1/05750686",
"title": "An Artificial Landmark Design Based on Mobile Robot Localization and Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750686/12OmNx5piSx",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a700",
"title": "Landmark-Based Localization for Indoor Mobile Robots with Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a700/12OmNyo1o02",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945766",
"title": "Semantic trajectory applied to the navigation of autonomous mobile robots",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945766/12OmNyoAA3b",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082150",
"title": "Navigation system based on ceiling landmark recognition for autonomous mobile robot-landmark detection based on fuzzy template matching (FTM)",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082150/12OmNyugz3B",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118059",
"title": "Experiments in autonomous navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118059/12OmNzayNd6",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012227",
"title": "Landmark recognition for autonomous mobile robots",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012227/12OmNzd7bVE",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2019/2865/0/286500a214",
"title": "Design and Implementation of ROS-Based Autonomous Mobile Robot Positioning and Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2019/286500a214/1fHln6GCCXK",
"parentPublication": {
"id": "proceedings/dcabes/2019/2865/0",
"title": "2019 18th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJbS2QCX5e",
"doi": "10.1109/VR51125.2022.00040",
"title": "Evaluating Visual Cues for Future Airborne Surveillance Using Simulated Augmented Reality Displays",
"normalizedTitle": "Evaluating Visual Cues for Future Airborne Surveillance Using Simulated Augmented Reality Displays",
"abstract": "This work explores the interaction between Augmented Reality (AR) and eye accommodation for airborne surveillance by simulating AR environments in Virtual Reality (VR). We simulate the AR display as displays with the capabilities needed for airborne surveillance are limited and because it would be hazardous to experiment directly on surveillance aircraft. While there is precedent for simulating AR in a VR environment, our study account for two of the physical and physiological aspects of AR: we factor in the focal plane of the AR technology and simulate the eye accommodation reflex of the user to provide focus. We ran a study with 24 participants examining AR cues to support visual search. We also compare the effects of having secondary tasks (that surveillance operators are normally responsible for) directly on the observation window using AR. Our results show that the effectiveness of the AR cues is dependent on the modality of the secondary task. We also found that, under certain situations, operators’ performances for the search task are improved if the focal plane of the AR display is at the same distance as subsequent search targets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work explores the interaction between Augmented Reality (AR) and eye accommodation for airborne surveillance by simulating AR environments in Virtual Reality (VR). We simulate the AR display as displays with the capabilities needed for airborne surveillance are limited and because it would be hazardous to experiment directly on surveillance aircraft. While there is precedent for simulating AR in a VR environment, our study account for two of the physical and physiological aspects of AR: we factor in the focal plane of the AR technology and simulate the eye accommodation reflex of the user to provide focus. We ran a study with 24 participants examining AR cues to support visual search. We also compare the effects of having secondary tasks (that surveillance operators are normally responsible for) directly on the observation window using AR. Our results show that the effectiveness of the AR cues is dependent on the modality of the secondary task. We also found that, under certain situations, operators’ performances for the search task are improved if the focal plane of the AR display is at the same distance as subsequent search targets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work explores the interaction between Augmented Reality (AR) and eye accommodation for airborne surveillance by simulating AR environments in Virtual Reality (VR). We simulate the AR display as displays with the capabilities needed for airborne surveillance are limited and because it would be hazardous to experiment directly on surveillance aircraft. While there is precedent for simulating AR in a VR environment, our study account for two of the physical and physiological aspects of AR: we factor in the focal plane of the AR technology and simulate the eye accommodation reflex of the user to provide focus. We ran a study with 24 participants examining AR cues to support visual search. We also compare the effects of having secondary tasks (that surveillance operators are normally responsible for) directly on the observation window using AR. Our results show that the effectiveness of the AR cues is dependent on the modality of the secondary task. We also found that, under certain situations, operators’ performances for the search task are improved if the focal plane of the AR display is at the same distance as subsequent search targets.",
"fno": "961700a213",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Eye",
"Surveillance",
"User Interfaces",
"Virtual Reality",
"Visual Cues",
"Future Airborne Surveillance",
"Simulated Augmented Reality Displays",
"AR Environments",
"Virtual Reality",
"AR Display",
"Surveillance Aircraft",
"VR Environment",
"Study Account",
"Physical Aspects",
"Physiological Aspects",
"Focal Plane",
"Eye Accommodation Reflex",
"AR Cues",
"Visual Search",
"Secondary Task",
"Surveillance Operators",
"Visualization",
"Three Dimensional Displays",
"Surveillance",
"User Interfaces",
"Physiology",
"Time Factors",
"Sea Level",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Empirical Studies In HCI",
"Human Centered Computing X 2014 Visualization X 2014 Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Nicolas Barbotin",
"givenName": "Nicolas",
"surname": "Barbotin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "James Baumeister",
"givenName": "James",
"surname": "Baumeister",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Andrew Cunningham",
"givenName": "Andrew",
"surname": "Cunningham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IMT Atlantique",
"fullName": "Thierry Duval",
"givenName": "Thierry",
"surname": "Duval",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IMT Atlantique",
"fullName": "Olivier Grisvard",
"givenName": "Olivier",
"surname": "Grisvard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Bruce H. Thomas",
"givenName": "Bruce H.",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "213-221",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJbRSEugpy",
"name": "pvr202296170-09756788s1-mm_961700a213.zip",
"size": "79 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756788s1-mm_961700a213.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a205",
"articleId": "1CJbOUWTweQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a222",
"articleId": "1CJc05Lu2LS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icppw/2012/4795/0/4795a173",
"title": "UAS Cloud Surveillance System",
"doi": null,
"abstractUrl": "/proceedings-article/icppw/2012/4795a173/12OmNBv2CkM",
"parentPublication": {
"id": "proceedings/icppw/2012/4795/0",
"title": "2012 41st International Conference on Parallel Processing Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981777",
"title": "Robust camera calibration tool for video surveillance camera in urban environment",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981777/12OmNqJHFKI",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201586",
"title": "Ground and airborne localization over rough terrain using random environmental range-measurements",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201586/12OmNwGqBpo",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223174",
"title": "Computational ground and airborne localization over rough terrain",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223174/12OmNxbEtNH",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2011/0844/0/06027387",
"title": "New concepts in Airborne and Ground Surveillance Systems",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2011/06027387/12OmNyNQSIi",
"parentPublication": {
"id": "proceedings/avss/2011/0844/0",
"title": "2011 8th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aero/2011/7350/0/05747374",
"title": "Integrated predictive surveillance service through datalink",
"doi": null,
"abstractUrl": "/proceedings-article/aero/2011/05747374/12OmNyRg4ui",
"parentPublication": {
"id": "proceedings/aero/2011/7350/0",
"title": "IEEE Aerospace Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctd/2009/3892/2/3892b350",
"title": "Camera Auto-Calibration Based on Motion Detection for Airborne Traffic Surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/icctd/2009/3892b350/12OmNzFMFr9",
"parentPublication": {
"id": "proceedings/icctd/2009/3892/2",
"title": "Computer Technology and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07226865",
"title": "Resolving the Vergence-Accommodation Conflict in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07226865/13rRUxASuhD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceert/2021/3817/0/381700a224",
"title": "Analysis on the Selection and Maintenance of Shipborne Video Surveillance System",
"doi": null,
"abstractUrl": "/proceedings-article/iceert/2021/381700a224/1A3jj9XvKyQ",
"parentPublication": {
"id": "proceedings/iceert/2021/3817/0",
"title": "2021 International Conference on Information Control, Electrical Engineering and Rail Transit (ICEERT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a246",
"title": "Comparison of Virtual-Real Integration Efficiency between Light Field and Conventional Near-Eye AR Displays",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a246/1GvditqC14Q",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcTykeypq",
"doi": "10.1109/VRW55335.2022.00316",
"title": "[DC] Leveraging AR Cues towards New Navigation Assistant Paradigm",
"normalizedTitle": "[DC] Leveraging AR Cues towards New Navigation Assistant Paradigm",
"abstract": "Extensive research has shown that the knowledge required to navigate an unfamiliar environment has been greatly reduced as many of the planning and decision-making tasks can be supplanted by the use of automated navigation systems. The progress in augmented reality (AR), particularly AR head-mounted displays (HMDs) foreshadows the prevalence of such devices as computational platforms of the future. AR displays open a new design space on navigational aids for solving this problem by superimposing virtual imagery over the environment. This dissertation abstract proposes a research agenda that investigates how to effectively leverage AR cues to help both navigation efficiency and spatial learning in walking scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extensive research has shown that the knowledge required to navigate an unfamiliar environment has been greatly reduced as many of the planning and decision-making tasks can be supplanted by the use of automated navigation systems. The progress in augmented reality (AR), particularly AR head-mounted displays (HMDs) foreshadows the prevalence of such devices as computational platforms of the future. AR displays open a new design space on navigational aids for solving this problem by superimposing virtual imagery over the environment. This dissertation abstract proposes a research agenda that investigates how to effectively leverage AR cues to help both navigation efficiency and spatial learning in walking scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extensive research has shown that the knowledge required to navigate an unfamiliar environment has been greatly reduced as many of the planning and decision-making tasks can be supplanted by the use of automated navigation systems. The progress in augmented reality (AR), particularly AR head-mounted displays (HMDs) foreshadows the prevalence of such devices as computational platforms of the future. AR displays open a new design space on navigational aids for solving this problem by superimposing virtual imagery over the environment. This dissertation abstract proposes a research agenda that investigates how to effectively leverage AR cues to help both navigation efficiency and spatial learning in walking scenarios.",
"fno": "840200a930",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Navigation",
"Virtual Imagery",
"Research Agenda",
"Leverage AR Cues",
"Navigation Efficiency",
"Unfamiliar Environment",
"Decision Making Tasks",
"Automated Navigation Systems",
"Augmented Reality",
"AR Head Mounted Displays",
"HMD",
"Computational Platforms",
"AR Displays",
"Design Space",
"AR Cue Leveraging",
"Navigation Assistant Paradigm",
"Training",
"Three Dimensional Displays",
"Software Design",
"Navigation",
"Shape",
"Conferences",
"Virtual Environments"
],
"authors": [
{
"affiliation": "Vanderbilt University,Department of Computer Science",
"fullName": "Yu Zhao",
"givenName": "Yu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "930-931",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a928",
"articleId": "1CJdRhDCDTO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a932",
"articleId": "1CJes1FnQLm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icoin/2018/2290/0/08343267",
"title": "Immersive gesture interfaces for 3D map navigation in HMD-based virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343267/12OmNvD8Rwt",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2016/4149/0/4149a095",
"title": "A Study in Virtual Navigation Cues for Forklift Operators",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2016/4149a095/12OmNwlqhJO",
"parentPublication": {
"id": "proceedings/svr/2016/4149/0",
"title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a069",
"title": "A User-Perspective View for Mobile AR Systems Using Discrete Depth Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a069/12OmNyOq51Q",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2080",
"title": "Navigation with Auditory Cues in a Virtual Environment",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2080/13rRUwInvi2",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08123949",
"title": "Efficient VR and AR Navigation Through Multiperspective Occlusion Management",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08123949/14H4WNoi7Yc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a952",
"title": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a952/1CJfs97XQhq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049687",
"title": "Evaluating Augmented Reality Landmark Cues and Frame of Reference Displays with Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049687/1KYovfHOQG4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/05/09151179",
"title": "Two is Better Than One. Improved Attention Guiding in AR by Combining Techniques",
"doi": null,
"abstractUrl": "/magazine/cg/2020/05/09151179/1lPCzce8nQI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a731",
"title": "[DC] SharpView AR: Enhanced Visual Acuity for Out-of-Focus Virtual Content",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a731/1tnXdLvN92o",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a147",
"title": "Supporting Vine Vegetation Status Observation Using AR",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a147/1yBF2DRa37y",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxkn3znZC",
"doi": "10.1109/VRW50115.2020.00165",
"title": "Map Displays And Landmark Effects On Wayfinding In Unfamiliar Environments",
"normalizedTitle": "Map Displays And Landmark Effects On Wayfinding In Unfamiliar Environments",
"abstract": "In this work, we investigated the effect of map presentations and landmarks on wayfinding performance. We carried out an experiment in virtual reality, participants were asked to navigate inside a 3D environment to find targets shown on the maps. We studied two kinds of maps: Skymap, a world-scale, and world-aligned head-up map and a track-up bird’s eye view map. Results showed that neither SkyMap nor landmarks did improve target finding performances. In fact, participants performed better with the track-up map.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we investigated the effect of map presentations and landmarks on wayfinding performance. We carried out an experiment in virtual reality, participants were asked to navigate inside a 3D environment to find targets shown on the maps. We studied two kinds of maps: Skymap, a world-scale, and world-aligned head-up map and a track-up bird’s eye view map. Results showed that neither SkyMap nor landmarks did improve target finding performances. In fact, participants performed better with the track-up map.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we investigated the effect of map presentations and landmarks on wayfinding performance. We carried out an experiment in virtual reality, participants were asked to navigate inside a 3D environment to find targets shown on the maps. We studied two kinds of maps: Skymap, a world-scale, and world-aligned head-up map and a track-up bird’s eye view map. Results showed that neither SkyMap nor landmarks did improve target finding performances. In fact, participants performed better with the track-up map.",
"fno": "09090399",
"keywords": [
"Navigation",
"Target Tracking",
"Three Dimensional Displays",
"Visualization",
"Augmented Reality",
"Human Centered Computing",
"Visualization",
"Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "University of Toronto",
"fullName": "Sabah Boustila",
"givenName": "Sabah",
"surname": "Boustila",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto",
"fullName": "Paul Milgram",
"givenName": "Paul",
"surname": "Milgram",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto",
"fullName": "Greg A. Jamieson",
"givenName": "Greg A.",
"surname": "Jamieson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "628-629",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090641",
"articleId": "1jIxtkAPJi8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090580",
"articleId": "1jIxns5TwxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wcecs/2008/3555/0/3555a219",
"title": "An e-Map Navigation System: Provide Region Search and Visualize Landmark Information",
"doi": null,
"abstractUrl": "/proceedings-article/wcecs/2008/3555a219/12OmNC8MsKF",
"parentPublication": {
"id": "proceedings/wcecs/2008/3555/0",
"title": "World Congress on Engineering and Computer Science, Advances in Electrical and Electronics Engineering - IAENG Special Edition of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2010/9339/0/05643292",
"title": "AR-enabled wayfinding kiosk",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2010/05643292/12OmNqG0SYj",
"parentPublication": {
"id": "proceedings/ismar-amh/2010/9339/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476608",
"title": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476608/12OmNsdo6rt",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2018/2195/0/219501a488",
"title": "Wayfinding Behavior Detection by Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2018/219501a488/12OmNxWcH87",
"parentPublication": {
"id": "proceedings/aina/2018/2195/0",
"title": "2018 IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480745",
"title": "Envisor: Online Environment Map Construction for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480745/12OmNyFCvTI",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2004/2186/0/21860132",
"title": "My Own Private Kiosk: Privacy-Preserving Public Displays",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2004/21860132/12OmNz5JC4E",
"parentPublication": {
"id": "proceedings/iswc/2004/2186/0",
"title": "Eighth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/169520693",
"title": "Concurrent Map Building and Localization with Landmark Validation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169520693/12OmNzayNu8",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icghit/2019/0627/0/062700a007",
"title": "Indoor Positioning System using Sensor and Crowdsourcing Landmark Map Update",
"doi": null,
"abstractUrl": "/proceedings-article/icghit/2019/062700a007/1e5ZeaqFMK4",
"parentPublication": {
"id": "proceedings/icghit/2019/0627/0",
"title": "2019 International Conference on Green and Human Information Technology (ICGHIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2019/5434/0/543400a102",
"title": "Use of Augmented Reality for Computational Thinking Stimulation through Virtual",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2019/543400a102/1fHjxdp8Yvu",
"parentPublication": {
"id": "proceedings/svr/2019/5434/0",
"title": "2019 21st Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09123548",
"title": "Tilt Map: Interactive Transitions Between Choropleth Map, Prism Map and Bar Chart in Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09123548/1kTxx2qV6zm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxyqb5Ali",
"doi": "10.1109/VRW50115.2020.00232",
"title": "Place in the World or Place on the Screen? Investigating the Effects of Augmented Reality Head -Up Display User Interfaces on Drivers’ Spatial Knowledge Acquisition and Glance Behavior",
"normalizedTitle": "Place in the World or Place on the Screen? Investigating the Effects of Augmented Reality Head -Up Display User Interfaces on Drivers’ Spatial Knowledge Acquisition and Glance Behavior",
"abstract": "When navigating via car, developing robust mental representations (spatial knowledge) of the environment is crucial in situations where technology fails, or we need to find locations not included in a navigation system’s database. In this work, we present a study that examines how screen-relative and world-relative augmented reality (AR) head-up display interfaces affect drivers’ glance behavior and spatial knowledge acquisition. Results showed that both AR interfaces have similar impact on the levels of spatial knowledge acquired. However, eye-tracking analyses showed fundamental differences in the way participants visually interacted with different AR interfaces; with conformal-graphics demanding more visual attention from drivers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When navigating via car, developing robust mental representations (spatial knowledge) of the environment is crucial in situations where technology fails, or we need to find locations not included in a navigation system’s database. In this work, we present a study that examines how screen-relative and world-relative augmented reality (AR) head-up display interfaces affect drivers’ glance behavior and spatial knowledge acquisition. Results showed that both AR interfaces have similar impact on the levels of spatial knowledge acquired. However, eye-tracking analyses showed fundamental differences in the way participants visually interacted with different AR interfaces; with conformal-graphics demanding more visual attention from drivers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When navigating via car, developing robust mental representations (spatial knowledge) of the environment is crucial in situations where technology fails, or we need to find locations not included in a navigation system’s database. In this work, we present a study that examines how screen-relative and world-relative augmented reality (AR) head-up display interfaces affect drivers’ glance behavior and spatial knowledge acquisition. Results showed that both AR interfaces have similar impact on the levels of spatial knowledge acquired. However, eye-tracking analyses showed fundamental differences in the way participants visually interacted with different AR interfaces; with conformal-graphics demanding more visual attention from drivers.",
"fno": "09090629",
"keywords": [
"Navigation",
"Vehicles",
"Visualization",
"Knowledge Acquisition",
"Head Up Displays",
"Roads",
"Augmented Reality",
"Head Up Display",
"Spatial Knowledge",
"Driving"
],
"authors": [
{
"affiliation": "Virginia Tech",
"fullName": "Nayara de Oliveira Faria",
"givenName": "Nayara de Oliveira",
"surname": "Faria",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech",
"fullName": "Joseph L. Gabbard",
"givenName": "Joseph L.",
"surname": "Gabbard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Oakland University",
"fullName": "Missie Smith",
"givenName": "Missie",
"surname": "Smith",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "762-763",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090480",
"articleId": "1jIxvICmsO4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090523",
"articleId": "1jIxwcxCGXK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2017/2943/0/2943a136",
"title": "3D-FRC: Depiction of the future road course in the Head-Up-Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a136/12OmNAo45DT",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/3/4077e662",
"title": "Survey of the AUTOSAR Complex Drivers in the Field of Automotive Electronics",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077e662/12OmNButpWy",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/3",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948447",
"title": "[Poster] Towards mobile augmented reality for the elderly",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948447/12OmNxE2n1D",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ibica/2011/4606/0/4606a316",
"title": "Design and Implement Augmented Reality for Supporting Driving Visual Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ibica/2011/4606a316/12OmNzahbSj",
"parentPublication": {
"id": "proceedings/ibica/2011/4606/0",
"title": "Innovations in Bio-inspired Computing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250127",
"title": "A Survey of Challenges Related to the Design of 3D User Interfaces for Car Drivers",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250127/12OmNzxyiyi",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466859",
"title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049687",
"title": "Evaluating Augmented Reality Landmark Cues and Frame of Reference Displays with Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049687/1KYovfHOQG4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a541",
"title": "A study of the influence of AR on the perception, comprehension and projection levels of situation awareness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a541/1MNgMgQsPjW",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090511",
"title": "Evaluating Automotive Augmented Reality Head-up Display Effects on Driver Performance and Distraction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090511/1jIxviTG03C",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/05/09266753",
"title": "cGAIL: <underline>C</underline>onditional <underline>G</underline>enerative <underline>A</underline>dversarial <underline>I</underline>mitation <underline>L</underline>earning—An Application in Taxi Drivers’ Strategy Learning",
"doi": null,
"abstractUrl": "/journal/bd/2022/05/09266753/1oZxfybCbCw",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTawwY",
"doi": "10.1109/VR.2017.7892315",
"title": "The effect of geometric realism on presence in a virtual reality game",
"normalizedTitle": "The effect of geometric realism on presence in a virtual reality game",
"abstract": "Previous research on visual realism and presence has not involved scenarios, graphics, and hardware representative of commercially available VR games. This poster details a between-subjects study (n=50) exploring if polygon count and texture resolution influence presence during exposure to a VR game. The results suggest that a higher polygon count and texture resolution increased presence as assessed by means of self-reports and physiological measures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Previous research on visual realism and presence has not involved scenarios, graphics, and hardware representative of commercially available VR games. This poster details a between-subjects study (n=50) exploring if polygon count and texture resolution influence presence during exposure to a VR game. The results suggest that a higher polygon count and texture resolution increased presence as assessed by means of self-reports and physiological measures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Previous research on visual realism and presence has not involved scenarios, graphics, and hardware representative of commercially available VR games. This poster details a between-subjects study (n=50) exploring if polygon count and texture resolution influence presence during exposure to a VR game. The results suggest that a higher polygon count and texture resolution increased presence as assessed by means of self-reports and physiological measures.",
"fno": "07892315",
"keywords": [
"Visualization",
"Games",
"Physiology",
"Virtual Environments",
"Atmospheric Measurements",
"Particle Measurements",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Jonatan S. Hvass",
"givenName": "Jonatan S.",
"surname": "Hvass",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Oliver Larsen",
"givenName": "Oliver",
"surname": "Larsen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Kasper B. Vendelbo",
"givenName": "Kasper B.",
"surname": "Vendelbo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Niels C. Nilsson",
"givenName": "Niels C.",
"surname": "Nilsson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Rolf Nordahl",
"givenName": "Rolf",
"surname": "Nordahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen",
"fullName": "Stefania Serafin",
"givenName": "Stefania",
"surname": "Serafin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "339-340",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892314",
"articleId": "12OmNxQOjwK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892316",
"articleId": "12OmNCzb9vr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643560",
"title": "Experiences with an AR evaluation test bed: Presence, performance, and physiological measurement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643560/12OmNCmGNZi",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892288",
"title": "The impact of transitions on user experience in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892288/12OmNzUPptg",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504761",
"title": "Avatar realism and social interaction quality in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a538",
"title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049649",
"title": "Comparing the Effects of Visual Realism on Size Perception in VR versus Real World Viewing through Physical and Verbal Judgments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049649/1KYolXflEWI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809590",
"title": "A Qualitative Study on the Effects of Real-World Stimuli and Place Familiarity on Presence",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809590/1cI62tizh7y",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798203",
"title": "[DC] Case-studies of Contemporary Presence Theory: Towards More Objective and Reliable Measures of Presence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798203/1cJ0NzKEIjS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797977",
"title": "Entropy of Controller Movements Reflects Mental Workload in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797977/1cJ0OGvQaSQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797870",
"title": "The Influence of Body Position on Presence When Playing a Virtual Reality Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797870/1cJ0RyhQnC0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a474",
"title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a474/1pysuR65ESQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvDI3MQ",
"title": "Computer Modeling and Simulation, International Conference on",
"acronym": "uksim",
"groupId": "1001885",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz6Apbw",
"doi": "10.1109/UKSim.2012.32",
"title": "User Perception of the Physical & Behavioral Realism of a Maritime Virtual Reality Environment",
"normalizedTitle": "User Perception of the Physical & Behavioral Realism of a Maritime Virtual Reality Environment",
"abstract": "Perception enhanced real-time Virtual Reality (VR) applications are used in various fields such as education and entertainment. The physical and behavioral realism of such applications are important in different perspectives. We have developed a perception enhanced real-time VR solution for maritime applications such as naval training, water way designs and simulate military scenes. In this paper, we present brief description of a six degrees of freedom (6-DOF) real-time mathematical ship motion simulation model and validation techniques for physical/ behavioral realism of a perception enhanced maritime VR environment. The user perceived physical and behavioral realism of the VR solution is investigated with user tests and preliminary results are presented.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Perception enhanced real-time Virtual Reality (VR) applications are used in various fields such as education and entertainment. The physical and behavioral realism of such applications are important in different perspectives. We have developed a perception enhanced real-time VR solution for maritime applications such as naval training, water way designs and simulate military scenes. In this paper, we present brief description of a six degrees of freedom (6-DOF) real-time mathematical ship motion simulation model and validation techniques for physical/ behavioral realism of a perception enhanced maritime VR environment. The user perceived physical and behavioral realism of the VR solution is investigated with user tests and preliminary results are presented.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Perception enhanced real-time Virtual Reality (VR) applications are used in various fields such as education and entertainment. The physical and behavioral realism of such applications are important in different perspectives. We have developed a perception enhanced real-time VR solution for maritime applications such as naval training, water way designs and simulate military scenes. In this paper, we present brief description of a six degrees of freedom (6-DOF) real-time mathematical ship motion simulation model and validation techniques for physical/ behavioral realism of a perception enhanced maritime VR environment. The user perceived physical and behavioral realism of the VR solution is investigated with user tests and preliminary results are presented.",
"fno": "4682a172",
"keywords": [
"Virtual Reality",
"Physical Behavioral Realism",
"Validation Techniques",
"User Perception",
"Maritime VR Solution"
],
"authors": [
{
"affiliation": null,
"fullName": "Damitha Sandaruwan",
"givenName": "Damitha",
"surname": "Sandaruwan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nihal Kodikara",
"givenName": "Nihal",
"surname": "Kodikara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chamath Keppitiyagama",
"givenName": "Chamath",
"surname": "Keppitiyagama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rexy Rosa",
"givenName": "Rexy",
"surname": "Rosa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mahen Jayawardena",
"givenName": "Mahen",
"surname": "Jayawardena",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Prabath Samarasinghe",
"givenName": "Prabath",
"surname": "Samarasinghe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "uksim",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-03-01T00:00:00",
"pubType": "proceedings",
"pages": "172-178",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4682-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4682a165",
"articleId": "12OmNqI04TB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4682a181",
"articleId": "12OmNvqEvJk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2000/0478/0/04780091",
"title": "Enhancing Fish Tank VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780091/12OmNAY79hZ",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759435",
"title": "Improving the realism in motion-based driving simulators by adapting tilt-translation technique to human perception",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759435/12OmNweBUR8",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a459",
"title": "Interactive Visualization of Normal Behavioral Models and Expert Rules for Maritime Anomaly Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a459/12OmNx7G5Vj",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d943",
"title": "Learning a Discriminative Model for the Perception of Realism in Composite Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d943/12OmNxAlA9f",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/09/08022957",
"title": "Image Visual Realism: From Human Perception to Machine Computation",
"doi": null,
"abstractUrl": "/journal/tp/2018/09/08022957/13rRUwInvKP",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a297",
"title": "Factors Influencing the Perception of Realism in Synthetic Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a297/17D45We0UDD",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a884",
"title": "Studying the Effect of Physical Realism on Time Perception in a HAZMAT VR Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a884/1CJeHh7xkYw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049649",
"title": "Comparing the Effects of Visual Realism on Size Perception in VR versus Real World Viewing through Physical and Verbal Judgments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049649/1KYolXflEWI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798040",
"title": "Virtual Hand Realism Affects Object Size Perception in Body-Based Scaling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798040/1cJ14CI2Jsk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a041",
"title": "Effects of Behavioral and Anthropomorphic Realism on Social Influence with Virtual Humans in AR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a041/1pBMhSuBHpe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.